diff --git a/.ci/build-freebsd.sh b/.ci/build-freebsd.sh index 7f35412b58..66a7319380 100755 --- a/.ci/build-freebsd.sh +++ b/.ci/build-freebsd.sh @@ -1,26 +1,26 @@ #!/bin/sh -ex -# Pull all the submodules except some +# Pull all the submodules except llvm # Note: Tried to use git submodule status, but it takes over 20 seconds # shellcheck disable=SC2046 -git config --global --add safe.directory . -git submodule -q update --init --depth 1 $(awk '/path/ && !/llvm/ && !/opencv/ && !/libpng/ && !/libsdl-org/ && !/curl/ && !/zlib/ && !/libusb/ { print $3 }' .gitmodules) +git submodule -q update --init --depth 1 $(awk '/path/ && !/llvm/ { print $3 }' .gitmodules) + +# Prefer newer Clang than in base system (see also .ci/install-freebsd.sh) +# libc++ isn't in llvm* packages, so download manually +fetch https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.0/libcxx-12.0.0.src.tar.xz +tar xf libcxx-12.0.0.src.tar.xz +export CC=clang12 CXX=clang++12 +export CXXFLAGS="$CXXFLAGS -nostdinc++ -D_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -isystem $PWD/libcxx-12.0.0.src/include" CONFIGURE_ARGS=" - -DWITH_LLVM=ON - -DUSE_SDL=OFF + -DWITH_LLVM=OFF -DUSE_PRECOMPILED_HEADERS=OFF -DUSE_NATIVE_INSTRUCTIONS=OFF -DUSE_SYSTEM_FFMPEG=ON -DUSE_SYSTEM_CURL=ON -DUSE_SYSTEM_LIBPNG=ON - -DUSE_SYSTEM_LIBUSB=ON - -DUSE_SYSTEM_OPENCV=ON " -# base Clang workaround (missing clang-scan-deps) -CONFIGURE_ARGS="$CONFIGURE_ARGS -DCMAKE_CXX_SCAN_FOR_MODULES=OFF" - # shellcheck disable=SC2086 cmake -B build -G Ninja $CONFIGURE_ARGS cmake --build build diff --git a/.ci/build-linux-aarch64.sh b/.ci/build-linux-aarch64.sh deleted file mode 100755 index 1fe640809c..0000000000 --- a/.ci/build-linux-aarch64.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -ex - -cd rpcs3 || exit 1 - -shellcheck .ci/*.sh - -git config --global --add safe.directory '*' - -# Pull all the submodules except some -# shellcheck disable=SC2046 -git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/libsdl-org/ && !/curl/ && !/zlib/ { print $3 }' .gitmodules) - -mkdir build && cd build || exit 1 - -if [ "$COMPILER" = "gcc" ]; then - # These are set in the dockerfile - export CC="${GCC_BINARY}" - export CXX="${GXX_BINARY}" - export LINKER=gold -else - export CC="${CLANG_BINARY}" - export CXX="${CLANGXX_BINARY}" - export LINKER="${LLD_BINARY}" -fi - -export LINKER_FLAG="-fuse-ld=${LINKER}" - -cmake .. \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DUSE_NATIVE_INSTRUCTIONS=OFF \ - -DUSE_PRECOMPILED_HEADERS=OFF \ - -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_SDL=ON \ - -DUSE_SYSTEM_SDL=ON \ - -DUSE_SYSTEM_FFMPEG=OFF \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_DISCORD_RPC=ON \ - -DOpenGL_GL_PREFERENCE=LEGACY \ - -DLLVM_DIR=/opt/llvm/lib/cmake/llvm \ - -DSTATIC_LINK_LLVM=ON \ - -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -G Ninja - -ninja; build_status=$?; - -cd .. - -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then - .ci/deploy-linux.sh "aarch64" -fi diff --git a/.ci/build-linux.sh b/.ci/build-linux.sh index 13a9e802f3..3da5ff8184 100755 --- a/.ci/build-linux.sh +++ b/.ci/build-linux.sh @@ -1,66 +1,70 @@ #!/bin/sh -ex -cd rpcs3 || exit 1 +# Setup Qt variables +export QT_BASE_DIR=/opt/qt${QTVERMIN} +export PATH=$QT_BASE_DIR/bin:$PATH +export LD_LIBRARY_PATH=$QT_BASE_DIR/lib/x86_64-linux-gnu:$QT_BASE_DIR/lib -shellcheck .ci/*.sh +if [ -z "$CIRRUS_CI" ]; then + cd rpcs3 || exit 1 +fi -git config --global --add safe.directory '*' - -# Pull all the submodules except some +# Pull all the submodules except llvm, since it is built separately and we just download that build # Note: Tried to use git submodule status, but it takes over 20 seconds # shellcheck disable=SC2046 -git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/libsdl-org/ && !/curl/ && !/zlib/ { print $3 }' .gitmodules) +git submodule -q update --init $(awk '/path/ && !/llvm/ { print $3 }' .gitmodules) + +# Download pre-compiled llvm libs +curl -sLO https://github.com/RPCS3/llvm-mirror/releases/download/custom-build/llvmlibs-linux.tar.gz +mkdir llvmlibs +tar -xzf ./llvmlibs-linux.tar.gz -C llvmlibs mkdir build && cd build || exit 1 if [ "$COMPILER" = "gcc" ]; then # These are set in the dockerfile - export CC="${GCC_BINARY}" - export CXX="${GXX_BINARY}" + export CC=${GCC_BINARY} + export CXX=${GXX_BINARY} export LINKER=gold # We need to set the following variables for LTO to link properly - export AR=/usr/bin/gcc-ar-"$GCCVER" - export RANLIB=/usr/bin/gcc-ranlib-"$GCCVER" + export AR=/usr/bin/gcc-ar-$GCCVER + export RANLIB=/usr/bin/gcc-ranlib-$GCCVER export CFLAGS="-fuse-linker-plugin" else - export CC="${CLANG_BINARY}" - export CXX="${CLANGXX_BINARY}" + export CC=${CLANG_BINARY} + export CXX=${CLANGXX_BINARY} export LINKER=lld - export AR=/usr/bin/llvm-ar-"$LLVMVER" - export RANLIB=/usr/bin/llvm-ranlib-"$LLVMVER" + export AR=/usr/bin/llvm-ar-$LLVMVER + export RANLIB=/usr/bin/llvm-ranlib-$LLVMVER fi -export LINKER_FLAG="-fuse-ld=${LINKER}" +export CFLAGS="$CFLAGS -fuse-ld=${LINKER}" cmake .. \ -DCMAKE_INSTALL_PREFIX=/usr \ + -DBUILD_LLVM_SUBMODULE=OFF \ + -DLLVM_DIR=llvmlibs/lib/cmake/llvm/ \ -DUSE_NATIVE_INSTRUCTIONS=OFF \ -DUSE_PRECOMPILED_HEADERS=OFF \ -DCMAKE_C_FLAGS="$CFLAGS" \ -DCMAKE_CXX_FLAGS="$CFLAGS" \ - -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \ -DCMAKE_AR="$AR" \ -DCMAKE_RANLIB="$RANLIB" \ -DUSE_SYSTEM_CURL=ON \ - -DUSE_SDL=ON \ - -DUSE_SYSTEM_SDL=ON \ - -DUSE_SYSTEM_FFMPEG=OFF \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_DISCORD_RPC=ON \ -DOpenGL_GL_PREFERENCE=LEGACY \ - -DLLVM_DIR=/opt/llvm/lib/cmake/llvm \ - -DSTATIC_LINK_LLVM=ON \ - -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ -G Ninja ninja; build_status=$?; cd .. +shellcheck .ci/*.sh + # If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then - .ci/deploy-linux.sh "x86_64" +# Azure and Cirrus publish PRs as artifacts only. +{ [ "$CI_HAS_ARTIFACTS" = "true" ]; +} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false" + +if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then + .ci/deploy-linux.sh fi diff --git a/.ci/build-mac-arm64.sh b/.ci/build-mac-arm64.sh deleted file mode 100755 index 3c9c864031..0000000000 --- a/.ci/build-mac-arm64.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/sh -ex - -# shellcheck disable=SC2086 -export HOMEBREW_NO_AUTO_UPDATE=1 -export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 -export HOMEBREW_NO_ENV_HINTS=1 -export HOMEBREW_NO_INSTALL_CLEANUP=1 - -/opt/homebrew/bin/brew install -f --overwrite --quiet nasm ninja p7zip ccache pipenv gnutls freetype googletest #create-dmg -/opt/homebrew/bin/brew install -f --quiet ffmpeg@5 -/opt/homebrew/bin/brew install --quiet "llvm@$LLVM_COMPILER_VER" glew cmake sdl3 vulkan-headers coreutils -/opt/homebrew/bin/brew link -f --quiet "llvm@$LLVM_COMPILER_VER" ffmpeg@5 - -# moltenvk based on commit for 1.3.0 release -wget https://raw.githubusercontent.com/Homebrew/homebrew-core/7255441cbcafabaa8950f67c7ec55ff499dbb2d3/Formula/m/molten-vk.rb -/opt/homebrew/bin/brew install -f --overwrite --formula --quiet ./molten-vk.rb -export CXX=clang++ -export CC=clang - -export BREW_PATH; -BREW_PATH="$(brew --prefix)" -export BREW_BIN="/opt/homebrew/bin" -export BREW_SBIN="/opt/homebrew/sbin" -export CMAKE_EXTRA_OPTS='-DLLVM_TARGETS_TO_BUILD=arm64' - -export WORKDIR; -WORKDIR="$(pwd)" - -# Get Qt -if [ ! -d "/tmp/Qt/$QT_VER" ]; then - mkdir -p "/tmp/Qt" - git clone https://github.com/engnr/qt-downloader.git - cd qt-downloader - git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597 - # nested Qt 6.9.1 URL workaround - # sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader - # sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader - # archived Qt 6.7.3 URL workaround - sed -i '' "s/official_releases/archive/g" qt-downloader - cd "/tmp/Qt" - arch -arm64 "$BREW_PATH/bin/pipenv" run pip3 uninstall py7zr requests semantic_version lxml - arch -arm64 "$BREW_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml --no-cache - mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64" - # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.1 workaround - arch -arm64 "$BREW_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64" -fi - -cd "$WORKDIR" -ditto "/tmp/Qt/$QT_VER" "qt-downloader/$QT_VER" - -export Qt6_DIR="$WORKDIR/qt-downloader/$QT_VER/clang_64/lib/cmake/Qt$QT_VER_MAIN" -export SDL3_DIR="$BREW_PATH/opt/sdl3/lib/cmake/SDL3" - -export PATH="$BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH" -export LDFLAGS="-L$BREW_PATH/lib $BREW_PATH/opt/ffmpeg@5/lib/libavcodec.dylib $BREW_PATH/opt/ffmpeg@5/lib/libavformat.dylib $BREW_PATH/opt/ffmpeg@5/lib/libavutil.dylib $BREW_PATH/opt/ffmpeg@5/lib/libswscale.dylib $BREW_PATH/opt/ffmpeg@5/lib/libswresample.dylib $BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++.1.dylib $BREW_PATH/lib/libSDL3.dylib $BREW_PATH/lib/libGLEW.dylib $BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/unwind/libunwind.1.dylib -Wl,-rpath,$BREW_PATH/lib" -export CPPFLAGS="-I$BREW_PATH/include -I$BREW_PATH/include -no-pie -D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" -export CFLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" -export LIBRARY_PATH="$BREW_PATH/lib" -export LD_LIBRARY_PATH="$BREW_PATH/lib" - -export VULKAN_SDK -VULKAN_SDK="$BREW_PATH/opt/molten-vk" -ln -s "$VULKAN_SDK/lib/libMoltenVK.dylib" "$VULKAN_SDK/lib/libvulkan.dylib" || true -export VK_ICD_FILENAMES="$VULKAN_SDK/share/vulkan/icd.d/MoltenVK_icd.json" - -export LLVM_DIR -LLVM_DIR="$BREW_PATH/opt/llvm@$LLVM_COMPILER_VER" -# exclude ffmpeg, LLVM, opencv, and sdl from submodule update -# shellcheck disable=SC2046 -git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/opencv/ && !/SDL/ { print $3 }' .gitmodules) - -# 3rdparty fixes -sed -i '' "s/extern const double NSAppKitVersionNumber;/const double NSAppKitVersionNumber = 1343;/g" 3rdparty/hidapi/hidapi/mac/hid.c - -mkdir build && cd build || exit 1 - -export MACOSX_DEPLOYMENT_TARGET=14.0 - -"$BREW_PATH/bin/cmake" .. \ - -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DUSE_SDL=ON \ - -DUSE_DISCORD_RPC=ON \ - -DUSE_VULKAN=ON \ - -DUSE_ALSA=OFF \ - -DUSE_PULSE=OFF \ - -DUSE_AUDIOUNIT=ON \ - -DUSE_SYSTEM_FFMPEG=ON \ - -DLLVM_CCACHE_BUILD=OFF \ - -DLLVM_BUILD_RUNTIME=OFF \ - -DLLVM_BUILD_TOOLS=OFF \ - -DLLVM_INCLUDE_DOCS=OFF \ - -DLLVM_INCLUDE_EXAMPLES=OFF \ - -DLLVM_INCLUDE_TESTS=OFF \ - -DLLVM_INCLUDE_TOOLS=OFF \ - -DLLVM_INCLUDE_UTILS=OFF \ - -DLLVM_USE_PERF=OFF \ - -DLLVM_ENABLE_Z3_SOLVER=OFF \ - -DUSE_NATIVE_INSTRUCTIONS=OFF \ - -DUSE_SYSTEM_MVK=ON \ - -DUSE_SYSTEM_FAUDIO=OFF \ - -DUSE_SYSTEM_SDL=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - "$CMAKE_EXTRA_OPTS" \ - -DLLVM_TARGET_ARCH=arm64 \ - -DCMAKE_OSX_ARCHITECTURES=arm64 \ - -DCMAKE_IGNORE_PATH="$BREW_PATH/lib" \ - -DCMAKE_IGNORE_PREFIX_PATH=/opt/homebrew/opt \ - -DCMAKE_CXX_FLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" \ - -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \ - -DCMAKE_OSX_SYSROOT="$(xcrun --sdk macosx --show-sdk-path)" \ - -G Ninja - -"$BREW_PATH/bin/ninja"; build_status=$?; - -cd .. - -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then - .ci/deploy-mac-arm64.sh -fi diff --git a/.ci/build-mac.sh b/.ci/build-mac.sh index 1e6bae01a1..5c2cf57e19 100755 --- a/.ci/build-mac.sh +++ b/.ci/build-mac.sh @@ -1,124 +1,27 @@ #!/bin/sh -ex -# shellcheck disable=SC2086 -export HOMEBREW_NO_AUTO_UPDATE=1 -export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 -export HOMEBREW_NO_ENV_HINTS=1 -export HOMEBREW_NO_INSTALL_CLEANUP=1 +export CCACHE_SLOPPINESS=pch_defines,time_macros +export CMAKE_PREFIX_PATH=/usr/local/opt/qt5/ +export PATH="/usr/local/opt/ccache/libexec:$PATH" -#/usr/sbin/softwareupdate --install-rosetta --agree-to-license -arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -arch -x86_64 /usr/local/bin/brew update -arch -x86_64 /usr/local/bin/brew install -f --overwrite --quiet python || arch -x86_64 /usr/local/bin/brew link --overwrite python -arch -x86_64 /usr/local/bin/brew install -f --overwrite --quiet nasm ninja p7zip ccache pipenv gnutls freetype #create-dmg -arch -x86_64 /usr/local/bin/brew install -f --quiet ffmpeg@5 -arch -x86_64 /usr/local/bin/brew install --quiet "llvm@$LLVM_COMPILER_VER" glew cmake sdl3 vulkan-headers coreutils -arch -x86_64 /usr/local/bin/brew link -f --quiet "llvm@$LLVM_COMPILER_VER" ffmpeg@5 +# Setup vulkan and gfx-rs/portability +curl -sLO https://github.com/gfx-rs/portability/releases/download/latest/gfx-portability-macos-latest.zip +unzip -: gfx-portability-macos-latest.zip +curl -sLO https://github.com/KhronosGroup/Vulkan-Headers/archive/sdk-1.1.106.0.zip +unzip -: sdk-*.zip +mkdir vulkan-sdk +ln -s "${PWD}"/Vulkan-Headers*/include vulkan-sdk/include +mkdir vulkan-sdk/lib +cp target/release/libportability.dylib vulkan-sdk/lib/libVulkan.dylib +# Let macdeployqt locate and install Vulkan library +install_name_tool -id "${PWD}"/vulkan-sdk/lib/libVulkan.dylib vulkan-sdk/lib/libVulkan.dylib +export VULKAN_SDK="${PWD}/vulkan-sdk" -# moltenvk based on commit for 1.3.0 release -wget https://raw.githubusercontent.com/Homebrew/homebrew-core/7255441cbcafabaa8950f67c7ec55ff499dbb2d3/Formula/m/molten-vk.rb -arch -x86_64 /usr/local/bin/brew install -f --overwrite --formula --quiet ./molten-vk.rb -export CXX=clang++ -export CC=clang - -export BREW_X64_PATH; -BREW_X64_PATH="$("/usr/local/bin/brew" --prefix)" -export BREW_BIN="/usr/local/bin" -export BREW_SBIN="/usr/local/sbin" -export CMAKE_EXTRA_OPTS='-DLLVM_TARGETS_TO_BUILD=X86' - -export WORKDIR; -WORKDIR="$(pwd)" - -# Get Qt -if [ ! -d "/tmp/Qt/$QT_VER" ]; then - mkdir -p "/tmp/Qt" - git clone https://github.com/engnr/qt-downloader.git - cd qt-downloader - git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597 - # nested Qt 6.9.1 URL workaround - # sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader - # sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader - # archived Qt 6.7.3 URL workaround - sed -i '' "s/official_releases/archive/g" qt-downloader - cd "/tmp/Qt" - arch -x86_64 "$BREW_X64_PATH/bin/pipenv" --python "$BREW_X64_PATH/bin/python3" run pip3 install py7zr requests semantic_version lxml - mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64" - # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.1 workaround - arch -x86_64 "$BREW_X64_PATH/bin/pipenv" --python "$BREW_X64_PATH/bin/python3" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64" -fi - -cd "$WORKDIR" -ditto "/tmp/Qt/$QT_VER" "qt-downloader/$QT_VER" - -export Qt6_DIR="$WORKDIR/qt-downloader/$QT_VER/clang_64/lib/cmake/Qt$QT_VER_MAIN" -export SDL3_DIR="$BREW_X64_PATH/opt/sdl3/lib/cmake/SDL3" - -export PATH="$BREW_X64_PATH/opt/llvm@$LLVM_COMPILER_VER/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH" -export LDFLAGS="-L$BREW_X64_PATH/lib -Wl,-rpath,$BREW_X64_PATH/lib" -export CPPFLAGS="-I$BREW_X64_PATH/include -msse -msse2 -mcx16 -no-pie -D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" -export CFLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" -export LIBRARY_PATH="$BREW_X64_PATH/lib" -export LD_LIBRARY_PATH="$BREW_X64_PATH/lib" - -export VULKAN_SDK -VULKAN_SDK="$BREW_X64_PATH/opt/molten-vk" -ln -s "$VULKAN_SDK/lib/libMoltenVK.dylib" "$VULKAN_SDK/lib/libvulkan.dylib" -export VK_ICD_FILENAMES="$VULKAN_SDK/share/vulkan/icd.d/MoltenVK_icd.json" - -export LLVM_DIR -LLVM_DIR="BREW_X64_PATH/opt/llvm@$LLVM_COMPILER_VER" -# exclude ffmpeg, LLVM, opencv, and sdl from submodule update +# Pull all the submodules except llvm +# Note: Tried to use git submodule status, but it takes over 20 seconds # shellcheck disable=SC2046 -git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/opencv/ && !/SDL/ { print $3 }' .gitmodules) - -# 3rdparty fixes -sed -i '' "s/extern const double NSAppKitVersionNumber;/const double NSAppKitVersionNumber = 1343;/g" 3rdparty/hidapi/hidapi/mac/hid.c +git submodule -q update --init $(awk '/path/ && !/llvm/ { print $3 }' .gitmodules) mkdir build && cd build || exit 1 - -export MACOSX_DEPLOYMENT_TARGET=14.0 - -"$BREW_X64_PATH/bin/cmake" .. \ - -DBUILD_RPCS3_TESTS=OFF \ - -DRUN_RPCS3_TESTS=OFF \ - -DUSE_SDL=ON \ - -DUSE_DISCORD_RPC=ON \ - -DUSE_VULKAN=ON \ - -DUSE_ALSA=OFF \ - -DUSE_PULSE=OFF \ - -DUSE_AUDIOUNIT=ON \ - -DUSE_SYSTEM_FFMPEG=ON \ - -DLLVM_CCACHE_BUILD=OFF \ - -DLLVM_BUILD_RUNTIME=OFF \ - -DLLVM_BUILD_TOOLS=OFF \ - -DLLVM_INCLUDE_DOCS=OFF \ - -DLLVM_INCLUDE_EXAMPLES=OFF \ - -DLLVM_INCLUDE_TESTS=OFF \ - -DLLVM_INCLUDE_TOOLS=OFF \ - -DLLVM_INCLUDE_UTILS=OFF \ - -DLLVM_USE_PERF=OFF \ - -DLLVM_ENABLE_Z3_SOLVER=OFF \ - -DUSE_NATIVE_INSTRUCTIONS=OFF \ - -DUSE_SYSTEM_MVK=ON \ - -DUSE_SYSTEM_FAUDIO=OFF \ - -DUSE_SYSTEM_SDL=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - "$CMAKE_EXTRA_OPTS" \ - -DLLVM_TARGET_ARCH=X86_64 \ - -DCMAKE_OSX_ARCHITECTURES=x86_64 \ - -DCMAKE_IGNORE_PATH="$BREW_X64_PATH/lib" \ - -DCMAKE_IGNORE_PREFIX_PATH=/usr/local/opt \ - -DCMAKE_CXX_FLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" \ - -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \ - -DCMAKE_OSX_SYSROOT="$(xcrun --sdk macosx --show-sdk-path)" \ - -G Ninja - -"$BREW_X64_PATH/bin/ninja"; build_status=$?; - -cd .. - -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then - .ci/deploy-mac.sh -fi +cmake .. -DWITH_LLVM=OFF -DUSE_NATIVE_INSTRUCTIONS=OFF -G Ninja +ninja diff --git a/.ci/build-windows-clang.sh b/.ci/build-windows-clang.sh deleted file mode 100644 index 0880e7f5ed..0000000000 --- a/.ci/build-windows-clang.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/sh -ex - -git config --global --add safe.directory '*' - -# Pull all the submodules except some -# Note: Tried to use git submodule status, but it takes over 20 seconds -# shellcheck disable=SC2046 -git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/ffmpeg/ && !/curl/ && !/FAudio/ && !/zlib/ { print $3 }' .gitmodules) - -mkdir build && cd build || exit 1 - -export CC="clang" -export CXX="clang++" -export LINKER=lld -export LINKER_FLAG="-fuse-ld=${LINKER}" - -if [ -n "$LLVMVER" ]; then - export AR="llvm-ar-$LLVMVER" - export RANLIB="llvm-ranlib-$LLVMVER" -else - export AR="llvm-ar" - export RANLIB="llvm-ranlib" -fi - -cmake .. \ - -DCMAKE_PREFIX_PATH=/clang64 \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DUSE_NATIVE_INSTRUCTIONS=OFF \ - -DUSE_PRECOMPILED_HEADERS=OFF \ - -DCMAKE_C_FLAGS="$CFLAGS" \ - -DCMAKE_CXX_FLAGS="$CFLAGS" \ - -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_AR="$AR" \ - -DCMAKE_RANLIB="$RANLIB" \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_FAUDIO=OFF \ - -DUSE_SDL=ON \ - -DUSE_SYSTEM_SDL=OFF \ - -DUSE_SYSTEM_FFMPEG=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENAL=OFF \ - -DUSE_DISCORD_RPC=ON \ - -DOpenGL_GL_PREFERENCE=LEGACY \ - -DWITH_LLVM=ON \ - -DLLVM_DIR=/clang64/lib/cmake/llvm \ - -DVulkan_LIBRARY=/clang64/lib/libvulkan-1.dll.a \ - -DSTATIC_LINK_LLVM=ON \ - -DBUILD_RPCS3_TESTS=OFF \ - -DRUN_RPCS3_TESTS=OFF \ - -G Ninja - -ninja; build_status=$?; - -cd .. - -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then - .ci/deploy-windows-clang.sh "x86_64" -fi diff --git a/.ci/deploy-linux.sh b/.ci/deploy-linux.sh index f8c3d849c3..286e25b9f6 100755 --- a/.ci/deploy-linux.sh +++ b/.ci/deploy-linux.sh @@ -2,51 +2,45 @@ cd build || exit 1 -CPU_ARCH="${1:-x86_64}" - if [ "$DEPLOY_APPIMAGE" = "true" ]; then - DESTDIR=AppDir ninja install + DESTDIR=appdir ninja install + QT_APPIMAGE="linuxdeployqt.AppImage" - curl -fsSLo /usr/bin/linuxdeploy "https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-$CPU_ARCH.AppImage" - chmod +x /usr/bin/linuxdeploy - curl -fsSLo /usr/bin/linuxdeploy-plugin-qt "https://github.com/linuxdeploy/linuxdeploy-plugin-qt/releases/download/continuous/linuxdeploy-plugin-qt-$CPU_ARCH.AppImage" - chmod +x /usr/bin/linuxdeploy-plugin-qt - curl -fsSLo linuxdeploy-plugin-checkrt.sh https://github.com/darealshinji/linuxdeploy-plugin-checkrt/releases/download/continuous/linuxdeploy-plugin-checkrt.sh - chmod +x ./linuxdeploy-plugin-checkrt.sh + curl -sL "https://github.com/probonopd/linuxdeployqt/releases/download/continuous/linuxdeployqt-continuous-x86_64.AppImage" > "$QT_APPIMAGE" + chmod a+x "$QT_APPIMAGE" + "./$QT_APPIMAGE" --appimage-extract + ./squashfs-root/AppRun ./appdir/usr/share/applications/*.desktop -bundle-non-qt-libs + ls ./appdir/usr/lib/ + rm -r ./appdir/usr/share/doc + cp "$(readlink -f /lib/x86_64-linux-gnu/libnsl.so.1)" ./appdir/usr/lib/libnsl.so.1 + export PATH=/rpcs3/build/squashfs-root/usr/bin/:${PATH} - export EXTRA_PLATFORM_PLUGINS="libqwayland-egl.so;libqwayland-generic.so" - export EXTRA_QT_PLUGINS="svg;wayland-decoration-client;wayland-graphics-integration-client;wayland-shell-integration;waylandcompositor" + # Embed newer libstdc++ for distros that don't come with it (ubuntu 16.04) + mkdir -p appdir/usr/optional/ ; mkdir -p appdir/usr/optional/libstdc++/ + cp /usr/lib/x86_64-linux-gnu/libstdc++.so.6 ./appdir/usr/optional/libstdc++/ + rm ./appdir/AppRun + curl -sL https://github.com/RPCS3/AppImageKit-checkrt/releases/download/continuous2/AppRun-patched-x86_64 -o ./appdir/AppRun + chmod a+x ./appdir/AppRun + curl -sL https://github.com/RPCS3/AppImageKit-checkrt/releases/download/continuous2/exec-x86_64.so -o ./appdir/usr/optional/exec.so - APPIMAGE_EXTRACT_AND_RUN=1 linuxdeploy --appdir AppDir --plugin qt --plugin checkrt + # Compile checker binary for AppImageKit-checkrt - # Remove libwayland-client because it has platform-dependent exports and breaks other OSes - rm -f ./AppDir/usr/lib/libwayland-client.so* + # This may need updating if you update the compiler or rpcs3 uses newer c++ features + # See https://github.com/gcc-mirror/gcc/blob/master/libstdc%2B%2B-v3/config/abi/pre/gnu.ver + # for which definitions correlate to which CXXABI version. + # Currently we target a minimum of GLIBCXX_3.4.29 and CXXABI_1.3.11 + printf "#include \n#include \n#include \nint main(){auto x = std::stringbuf();x.get_allocator();std::make_exception_ptr(0);std::pmr::get_default_resource();}" \ + | $CXX -x c++ -std=c++2a -o ./appdir/usr/optional/checker - - # Remove libvulkan because it causes issues with gamescope - rm -f ./AppDir/usr/lib/libvulkan.so* + # Package it up and send it off + ./squashfs-root/usr/bin/appimagetool "$APPDIR" + + ls - # Remove unused Qt6 libraries - rm -f ./AppDir/usr/lib/libQt6VirtualKeyboard.so* - rm -f ./AppDir/usr/plugins/platforminputcontexts/libqtvirtualkeyboardplugin.so* - - # Remove git directory containing local commit history file - rm -rf ./AppDir/usr/share/rpcs3/git - - curl -fsSLo /uruntime "https://github.com/VHSgunzo/uruntime/releases/download/v0.3.4/uruntime-appimage-dwarfs-$CPU_ARCH" - chmod +x /uruntime - /uruntime --appimage-mkdwarfs -f --set-owner 0 --set-group 0 --no-history --no-create-timestamp \ - --compression zstd:level=22 -S26 -B32 --header /uruntime -i AppDir -o RPCS3.AppImage - - APPIMAGE_SUFFIX="linux_${CPU_ARCH}" - if [ "$CPU_ARCH" = "x86_64" ]; then - # Preserve back compat. Previous versions never included the full arch. - APPIMAGE_SUFFIX="linux64" - fi - - COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ../rpcs3/rpcs3_version.cpp) + COMM_TAG="$(grep 'version{.*}' ../rpcs3/rpcs3_version.cpp | awk -F[\{,] '{printf "%d.%d.%d", $2, $3, $4}')" COMM_COUNT="$(git rev-list --count HEAD)" COMM_HASH="$(git rev-parse --short=8 HEAD)" - RPCS3_APPIMAGE="rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_${APPIMAGE_SUFFIX}.AppImage" + RPCS3_APPIMAGE="rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_linux64.AppImage" mv ./RPCS3*.AppImage "$RPCS3_APPIMAGE" @@ -58,4 +52,9 @@ if [ "$DEPLOY_APPIMAGE" = "true" ]; then FILESIZE=$(stat -c %s ./rpcs3*.AppImage) SHA256SUM=$(sha256sum ./rpcs3*.AppImage | awk '{ print $1 }') echo "${SHA256SUM};${FILESIZE}B" > "$RELEASE_MESSAGE" + +fi + +if [ "$DEPLOY_PPA" = "true" ]; then + export DEBFULLNAME="RPCS3 Build Bot" fi diff --git a/.ci/deploy-llvm.sh b/.ci/deploy-llvm.sh deleted file mode 100644 index 35e5d780af..0000000000 --- a/.ci/deploy-llvm.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -ex - -# First let's print some info about our caches -"$(cygpath -u "$CCACHE_BIN_DIR")"/ccache.exe --show-stats -v - -# BUILD_blablabla is Azure specific, so we wrap it for portability -ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY" -BUILD="llvmlibs_mt.7z" - -# Package artifacts -7z a -m0=LZMA2 -mx9 "$BUILD" ./build/lib/Release-x64/llvm_build - -# Generate sha256 hashes -# Write to file for GitHub releases -sha256sum "$BUILD" | awk '{ print $1 }' | tee "$BUILD.sha256" -echo "$(cat "$BUILD.sha256");$(stat -c %s "$BUILD")B" > GitHubReleaseMessage.txt - -# Move files to publishing directory -cp -- "$BUILD" "$ARTIFACT_DIR" -cp -- "$BUILD.sha256" "$ARTIFACT_DIR" diff --git a/.ci/deploy-mac-arm64.sh b/.ci/deploy-mac-arm64.sh deleted file mode 100755 index e7de472378..0000000000 --- a/.ci/deploy-mac-arm64.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh -ex - -# shellcheck disable=SC2086 -cd build || exit 1 - -# Gather explicit version number and number of commits -COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ../rpcs3/rpcs3_version.cpp) -COMM_COUNT=$(git rev-list --count HEAD) -COMM_HASH=$(git rev-parse --short=8 HEAD) - -AVVER="${COMM_TAG}-${COMM_COUNT}" - -# AVVER is used for GitHub releases, it is the version number. -echo "AVVER=$AVVER" >> ../.ci/ci-vars.env - -cd bin -mkdir "rpcs3.app/Contents/lib/" || true - -cp "$(realpath /opt/homebrew/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++abi.1.0.dylib)" "rpcs3.app/Contents/Frameworks/libc++abi.1.dylib" -cp "$(realpath /opt/homebrew/lib/libsharpyuv.0.dylib)" "rpcs3.app/Contents/lib/libsharpyuv.0.dylib" -cp "$(realpath /opt/homebrew/lib/libintl.8.dylib)" "rpcs3.app/Contents/lib/libintl.8.dylib" - -rm -rf "rpcs3.app/Contents/Frameworks/QtPdf.framework" \ -"rpcs3.app/Contents/Frameworks/QtQml.framework" \ -"rpcs3.app/Contents/Frameworks/QtQmlModels.framework" \ -"rpcs3.app/Contents/Frameworks/QtQuick.framework" \ -"rpcs3.app/Contents/Frameworks/QtVirtualKeyboard.framework" \ -"rpcs3.app/Contents/Plugins/platforminputcontexts" \ -"rpcs3.app/Contents/Plugins/virtualkeyboard" \ -"rpcs3.app/Contents/Resources/git" - -../../.ci/optimize-mac.sh rpcs3.app - -# Hack -install_name_tool \ --delete_rpath /opt/homebrew/lib \ --delete_rpath /opt/homebrew/opt/llvm@$LLVM_COMPILER_VER/lib RPCS3.app/Contents/MacOS/rpcs3 -#-delete_rpath /opt/homebrew1/Cellar/sdl3/3.2.8/lib - -# Need to do this rename hack due to case insensitive filesystem -mv rpcs3.app RPCS3_.app -mv RPCS3_.app RPCS3.app - -# NOTE: "--deep" is deprecated -codesign --deep -fs - RPCS3.app - -echo "[InternetShortcut]" > Quickstart.url -echo "URL=https://rpcs3.net/quickstart" >> Quickstart.url -echo "IconIndex=0" >> Quickstart.url - -#DMG_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos_arm64.dmg" -#"$BREW_X64_PATH/bin/create-dmg" --volname RPCS3 \ -#--window-size 800 400 \ -#--icon-size 100 \ -#--icon rpcs3.app 200 190 \ -#--add-file Quickstart.url Quickstart.url 400 20 \ -#--hide-extension rpcs3.app \ -#--hide-extension Quickstart.url \ -#--app-drop-link 600 185 \ -#--skip-jenkins \ -#--format ULMO \ -#"$DMG_FILEPATH" \ -#RPCS3.app -#FILESIZE=$(stat -f %z "$DMG_FILEPATH") -#SHA256SUM=$(shasum -a 256 "$DMG_FILEPATH" | awk '{ print $1 }') - -ARCHIVE_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos_arm64.7z" -"$BREW_PATH/bin/7z" a -mx9 "$ARCHIVE_FILEPATH" RPCS3.app Quickstart.url -FILESIZE=$(stat -f %z "$ARCHIVE_FILEPATH") -SHA256SUM=$(shasum -a 256 "$ARCHIVE_FILEPATH" | awk '{ print $1 }') - -cd .. -echo "${SHA256SUM};${FILESIZE}B" > "$RELEASE_MESSAGE" -cd bin diff --git a/.ci/deploy-mac.sh b/.ci/deploy-mac.sh deleted file mode 100755 index c293358748..0000000000 --- a/.ci/deploy-mac.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh -ex - -# shellcheck disable=SC2086 -cd build || exit 1 - -# Gather explicit version number and number of commits -COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ../rpcs3/rpcs3_version.cpp) -COMM_COUNT=$(git rev-list --count HEAD) -COMM_HASH=$(git rev-parse --short=8 HEAD) - -AVVER="${COMM_TAG}-${COMM_COUNT}" - -# AVVER is used for GitHub releases, it is the version number. -echo "AVVER=$AVVER" >> ../.ci/ci-vars.env - -cd bin -mkdir "rpcs3.app/Contents/lib/" - -cp "/usr/local/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++abi.1.0.dylib" "rpcs3.app/Contents/lib/libc++abi.1.dylib" -cp "$(realpath /usr/local/lib/libsharpyuv.0.dylib)" "rpcs3.app/Contents/lib/libsharpyuv.0.dylib" -cp "$(realpath /usr/local/lib/libintl.8.dylib)" "rpcs3.app/Contents/lib/libintl.8.dylib" - -rm -rf "rpcs3.app/Contents/Frameworks/QtPdf.framework" \ -"rpcs3.app/Contents/Frameworks/QtQml.framework" \ -"rpcs3.app/Contents/Frameworks/QtQmlModels.framework" \ -"rpcs3.app/Contents/Frameworks/QtQuick.framework" \ -"rpcs3.app/Contents/Frameworks/QtVirtualKeyboard.framework" \ -"rpcs3.app/Contents/Plugins/platforminputcontexts" \ -"rpcs3.app/Contents/Plugins/virtualkeyboard" \ -"rpcs3.app/Contents/Resources/git" - -../../.ci/optimize-mac.sh rpcs3.app - -# Need to do this rename hack due to case insensitive filesystem -mv rpcs3.app RPCS3_.app -mv RPCS3_.app RPCS3.app - -# Hack -install_name_tool \ --delete_rpath /usr/local/lib \ --delete_rpath /usr/local/opt/llvm@$LLVM_COMPILER_VER/lib RPCS3.app/Contents/MacOS/rpcs3 -#-delete_rpath /usr/local/Cellar/sdl3/3.2.8/lib - -# NOTE: "--deep" is deprecated -codesign --deep -fs - RPCS3.app - -echo "[InternetShortcut]" > Quickstart.url -echo "URL=https://rpcs3.net/quickstart" >> Quickstart.url -echo "IconIndex=0" >> Quickstart.url - -#DMG_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos.dmg" -#"$BREW_X64_PATH/bin/create-dmg" --volname RPCS3 \ -#--window-size 800 400 \ -#--icon-size 100 \ -#--icon rpcs3.app 200 190 \ -#--add-file Quickstart.url Quickstart.url 400 20 \ -#--hide-extension rpcs3.app \ -#--hide-extension Quickstart.url \ -#--app-drop-link 600 185 \ -#--skip-jenkins \ -#--format ULMO \ -#"$DMG_FILEPATH" \ -#RPCS3.app -#FILESIZE=$(stat -f %z "$DMG_FILEPATH") -#SHA256SUM=$(shasum -a 256 "$DMG_FILEPATH" | awk '{ print $1 }') - -ARCHIVE_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos.7z" -"$BREW_X64_PATH/bin/7z" a -mx9 "$ARCHIVE_FILEPATH" RPCS3.app Quickstart.url -FILESIZE=$(stat -f %z "$ARCHIVE_FILEPATH") -SHA256SUM=$(shasum -a 256 "$ARCHIVE_FILEPATH" | awk '{ print $1 }') - -cd .. -echo "${SHA256SUM};${FILESIZE}B" > "$RELEASE_MESSAGE" -cd bin diff --git a/.ci/deploy-windows-clang.sh b/.ci/deploy-windows-clang.sh deleted file mode 100644 index d45cb45acf..0000000000 --- a/.ci/deploy-windows-clang.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -ex - -# source ci-vars.env -# shellcheck disable=SC1091 -. .ci/ci-vars.env - -cd build || exit 1 - -CPU_ARCH="${1:-x86_64}" - -echo "Deploying rpcs3 windows clang $CPU_ARCH" - -# BUILD_blablabla is CI specific, so we wrap it for portability -ARTIFACT_DIR=$(cygpath -u "$BUILD_ARTIFACTSTAGINGDIRECTORY") -MSYS2_CLANG_BIN=$(cygpath -w /clang64/bin) -MSYS2_USR_BIN=$(cygpath -w /usr/bin) - -echo "Installing dependencies of: ./bin/rpcs3.exe (MSYS2 dir is '$MSYS2_CLANG_BIN', usr dir is '$MSYS2_USR_BIN')" -cmake -DMSYS2_CLANG_BIN="$MSYS2_CLANG_BIN" -DMSYS2_USR_BIN="$MSYS2_USR_BIN" -Dexe=./bin/rpcs3.exe -P ../buildfiles/cmake/CopyRuntimeDependencies.cmake - -# Prepare compatibility and SDL database for packaging -mkdir ./bin/config -mkdir ./bin/config/input_configs -curl -fsSL 'https://raw.githubusercontent.com/gabomdq/SDL_GameControllerDB/master/gamecontrollerdb.txt' 1> ./bin/config/input_configs/gamecontrollerdb.txt -curl -fsSL 'https://rpcs3.net/compatibility?api=v1&export' | iconv -t UTF-8 1> ./bin/GuiConfigs/compat_database.dat - -# Package artifacts -7z a -m0=LZMA2 -mx9 "$BUILD" ./bin/* - -# Generate sha256 hashes -# Write to file for GitHub releases -sha256sum "$BUILD" | awk '{ print $1 }' | tee "$BUILD.sha256" -echo "$(cat "$BUILD.sha256");$(stat -c %s "$BUILD")B" > GitHubReleaseMessage.txt - -# Move files to publishing directory -mkdir -p "$ARTIFACT_DIR" -cp -- "$BUILD" "$ARTIFACT_DIR" -cp -- "$BUILD.sha256" "$ARTIFACT_DIR" diff --git a/.ci/deploy-windows.sh b/.ci/deploy-windows.sh index b885831511..f269fe45d4 100755 --- a/.ci/deploy-windows.sh +++ b/.ci/deploy-windows.sh @@ -1,22 +1,15 @@ #!/bin/sh -ex -# First let's print some info about our caches -"$(cygpath -u "$CCACHE_BIN_DIR")"/ccache.exe --show-stats -v - -# BUILD_blablabla is CI specific, so we wrap it for portability +# BUILD_blablabla is Azure specific, so we wrap it for portability ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY" # Remove unecessary files rm -f ./bin/rpcs3.exp ./bin/rpcs3.lib ./bin/rpcs3.pdb ./bin/vc_redist.x64.exe -# Prepare compatibility and SDL database for packaging -mkdir ./bin/config -mkdir ./bin/config/input_configs -curl -fsSL 'https://raw.githubusercontent.com/gabomdq/SDL_GameControllerDB/master/gamecontrollerdb.txt' 1> ./bin/config/input_configs/gamecontrollerdb.txt -curl -fsSL 'https://rpcs3.net/compatibility?api=v1&export' | iconv -t UTF-8 1> ./bin/GuiConfigs/compat_database.dat - -# Download SSL certificate (not needed with CURLSSLOPT_NATIVE_CA) -#curl -fsSL 'https://curl.haxx.se/ca/cacert.pem' 1> ./bin/cacert.pem +# Prepare compatibility database for packaging, as well as +# certificate for ssl (auto-updater) +curl -sL 'https://rpcs3.net/compatibility?api=v1&export' | iconv -t UTF-8 > ./bin/GuiConfigs/compat_database.dat +curl -sL 'https://curl.haxx.se/ca/cacert.pem' > ./bin/cacert.pem # Package artifacts 7z a -m0=LZMA2 -mx9 "$BUILD" ./bin/* diff --git a/.ci/docker.env b/.ci/docker.env index eb70b68c18..2b36fb34c0 100644 --- a/.ci/docker.env +++ b/.ci/docker.env @@ -1,4 +1,5 @@ -# Variables set by CI +# Variables set by Azure Pipelines +CI_HAS_ARTIFACTS BUILD_REASON BUILD_SOURCEVERSION BUILD_ARTIFACTSTAGINGDIRECTORY @@ -7,7 +8,6 @@ BUILD_SOURCEBRANCHNAME APPDIR ARTDIR RELEASE_MESSAGE -RUN_UNIT_TESTS # Variables for build matrix COMPILER DEPLOY_APPIMAGE diff --git a/.ci/export-azure-vars.sh b/.ci/export-azure-vars.sh new file mode 100755 index 0000000000..7ab0515200 --- /dev/null +++ b/.ci/export-azure-vars.sh @@ -0,0 +1,13 @@ +#!/bin/sh -e + +# Export variables for later stages of the Azure pipeline +# Values done in this manner will appear as environment variables +# in later stages. + +# From pure-sh-bible +# Setting 'IFS' tells 'read' where to split the string. +while IFS='=' read -r key val; do + # Skip over lines containing comments. + [ "${key##\#*}" ] || continue + echo "##vso[task.setvariable variable=$key]$val" +done < ".ci/azure-vars.env" diff --git a/.ci/export-cirrus-vars.sh b/.ci/export-cirrus-vars.sh new file mode 100644 index 0000000000..9269ea89aa --- /dev/null +++ b/.ci/export-cirrus-vars.sh @@ -0,0 +1,13 @@ +#!/bin/sh -e + +# Export variables for later stages of the Cirrus pipeline +# Values done in this manner will appear as environment variables +# in later stages. + +# From pure-sh-bible +# Setting 'IFS' tells 'read' where to split the string. +while IFS='=' read -r key val; do + # Skip over lines containing comments. + [ "${key##\#*}" ] || continue + export "$key"="$val" +done < ".ci/azure-vars.env" diff --git a/.ci/generate-qt-ts.sh b/.ci/generate-qt-ts.sh deleted file mode 100755 index a9fc139a93..0000000000 --- a/.ci/generate-qt-ts.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -ex - -mkdir -p ../translations - -LUPDATE_PATH=$(find /usr -name lupdate -type f 2>/dev/null | head -n 1) -if [ -z "$LUPDATE_PATH" ]; then - echo "Error: lupdate not found!" - exit 1 -else - echo "lupdate found at: $LUPDATE_PATH" - $LUPDATE_PATH -recursive . -ts ../translations/rpcs3_template.ts - sed -i 's|filename="\.\./|filename="./|g' ../translations/rpcs3_template.ts -fi \ No newline at end of file diff --git a/.ci/get_keys-windows.sh b/.ci/get_keys-windows.sh old mode 100755 new mode 100644 index 8384b4de5d..1f6efc379c --- a/.ci/get_keys-windows.sh +++ b/.ci/get_keys-windows.sh @@ -1,3 +1,4 @@ #!/bin/sh -ex -curl -fLo "./llvm.lock" "https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-${LLVM_VER}/llvmlibs_mt.7z.sha256" +curl -L -o "./llvm.lock" "https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win/llvmlibs_mt.7z.sha256" +curl -L -o "./glslang.lock" "https://github.com/RPCS3/glslang/releases/download/custom-build-win/glslanglibs_mt.7z.sha256" diff --git a/.ci/github-upload.sh b/.ci/github-upload.sh index 8cc89314d5..53d3d09895 100755 --- a/.ci/github-upload.sh +++ b/.ci/github-upload.sh @@ -16,7 +16,7 @@ generate_post_data() EOF } -curl -fsS \ +curl -s \ -H "Authorization: token ${RPCS3_TOKEN}" \ -H "Accept: application/vnd.github.v3+json" \ --data "$(generate_post_data)" "https://api.github.com/repos/$UPLOAD_REPO_FULL_NAME/releases" >> release.json @@ -28,7 +28,7 @@ echo "${id:?}" upload_file() { - curl -fsS \ + curl -s \ -H "Authorization: token ${RPCS3_TOKEN}" \ -H "Accept: application/vnd.github.v3+json" \ -H "Content-Type: application/octet-stream" \ diff --git a/.ci/install-freebsd.sh b/.ci/install-freebsd.sh index 04efc6c0e9..55ed346a97 100755 --- a/.ci/install-freebsd.sh +++ b/.ci/install-freebsd.sh @@ -2,17 +2,17 @@ # NOTE: this script is run under root permissions # shellcheck shell=sh disable=SC2096 -# RPCS3 often needs recent Qt and Vulkan-Headers +# RPCS3 often needs recent Qt5 and Vulkan-Headers sed -i '' 's/quarterly/latest/' /etc/pkg/FreeBSD.conf export ASSUME_ALWAYS_YES=true pkg info # debug -# WITH_LLVM -pkg install "llvm$LLVM_COMPILER_VER" +# Prefer newer Clang than in base system (see also .ci/build-freebsd.sh) +pkg install llvm12 -# Mandatory dependencies (qtX-base is pulled via qtX-multimedia) -pkg install git ccache cmake ninja "qt$QT_VER_MAIN-multimedia" "qt$QT_VER_MAIN-svg" glew openal-soft ffmpeg +# Mandatory dependencies (qt5-dbus and qt5-gui are pulled via qt5-widgets) +pkg install git ccache cmake ninja qt5-qmake qt5-buildtools qt5-widgets qt5-concurrent glew openal-soft ffmpeg -# Optional dependencies (libevdev is pulled by qtX-base) -pkg install pkgconf alsa-lib pulseaudio sdl3 evdev-proto vulkan-headers vulkan-loader opencv +# Optional dependencies (libevdev is pulled by qt5-gui) +pkg install pkgconf alsa-lib pulseaudio sdl2 evdev-proto vulkan-headers vulkan-loader diff --git a/.ci/optimize-mac.sh b/.ci/optimize-mac.sh deleted file mode 100755 index 5fea7877f4..0000000000 --- a/.ci/optimize-mac.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -file_path=$(find "$1/Contents/MacOS" -type f -print0 | head -n 1) - -if [ -z "$file_path" ]; then - echo "No executable file found in $1/Contents/MacOS" >&2 - exit 1 -fi - - -target_architecture="$(lipo "$file_path" -archs)" - -if [ -z "$target_architecture" ]; then - exit 1 -fi - -# shellcheck disable=SC3045 -find "$1" -type f -print0 | while IFS= read -r -d '' file; do - echo Thinning "$file" -> "$target_architecture" - lipo "$file" -thin "$target_architecture" -output "$file" || true -done diff --git a/.ci/setup-llvm.sh b/.ci/setup-llvm.sh deleted file mode 100644 index a54901309e..0000000000 --- a/.ci/setup-llvm.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh -ex - -# Resource/dependency URLs -CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.11.2/ccache-4.11.2-windows-x86_64.zip" - -DEP_URLS=" \ - $CCACHE_URL" - -# CI doesn't make a cache dir if it doesn't exist, so we do it manually -[ -d "$DEPS_CACHE_DIR" ] || mkdir "$DEPS_CACHE_DIR" - -# Pull the llvm submodule -# shellcheck disable=SC2046 -git submodule -q update --init --depth=1 -- 3rdparty/llvm - -# Git bash doesn't have rev, so here it is -rev() -{ - echo "$1" | awk '{ for(i = length($0); i != 0; --i) { a = a substr($0, i, 1); } } END { print a }' -} - -# Usage: download_and_verify url checksum algo file -# Check to see if a file is already cached, and the checksum matches. If not, download it. -# Tries up to 3 times -download_and_verify() -{ - url="$1" - correctChecksum="$2" - algo="$3" - fileName="$4" - - for _ in 1 2 3; do - [ -e "$DEPS_CACHE_DIR/$fileName" ] || curl -fLo "$DEPS_CACHE_DIR/$fileName" "$url" - fileChecksum=$("${algo}sum" "$DEPS_CACHE_DIR/$fileName" | awk '{ print $1 }') - [ "$fileChecksum" = "$correctChecksum" ] && return 0 - done - - return 1; -} - -# Some dependencies install here -[ -d "./build/lib_ext/Release-x64" ] || mkdir -p "./build/lib_ext/Release-x64" - -for url in $DEP_URLS; do - # Get the filename from the URL and remove query strings (?arg=something). - fileName="$(rev "$(rev "$url" | cut -d'/' -f1)" | cut -d'?' -f1)" - [ -z "$fileName" ] && echo "Unable to parse url: $url" && exit 1 - - # shellcheck disable=SC1003 - case "$url" in - *ccache*) checksum=$CCACHE_SHA; algo="sha256"; outDir="$CCACHE_BIN_DIR" ;; - *) echo "Unknown url resource: $url"; exit 1 ;; - esac - - download_and_verify "$url" "$checksum" "$algo" "$fileName" - 7z x -y "$DEPS_CACHE_DIR/$fileName" -aos -o"$outDir" -done - -# Setup ccache tool -[ -d "$CCACHE_DIR" ] || mkdir -p "$(cygpath -u "$CCACHE_DIR")" -CCACHE_SH_DIR=$(cygpath -u "$CCACHE_BIN_DIR") -mv "$CCACHE_SH_DIR"/ccache-*/* "$CCACHE_SH_DIR" -cp "$CCACHE_SH_DIR"/ccache.exe "$CCACHE_SH_DIR"/cl.exe diff --git a/.ci/setup-windows-ci-vars.sh b/.ci/setup-windows-ci-vars.sh deleted file mode 100644 index 11373e0716..0000000000 --- a/.ci/setup-windows-ci-vars.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -ex - -CPU_ARCH="${1:-win64}" -COMPILER="${2:-msvc}" - -# These are CI specific, so we wrap them for portability -REPO_NAME="$BUILD_REPOSITORY_NAME" -REPO_BRANCH="$BUILD_SOURCEBRANCHNAME" -PR_NUMBER="$BUILD_PR_NUMBER" - -# Gather explicit version number and number of commits -COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) -COMM_COUNT=$(git rev-list --count HEAD) -COMM_HASH=$(git rev-parse --short=8 HEAD) - -# Format the above into filenames -if [ -n "$PR_NUMBER" ]; then - AVVER="${COMM_TAG}-${COMM_HASH}" - BUILD_RAW="rpcs3-v${AVVER}_${CPU_ARCH}_${COMPILER}" - BUILD="${BUILD_RAW}.7z" -else - AVVER="${COMM_TAG}-${COMM_COUNT}" - BUILD_RAW="rpcs3-v${AVVER}-${COMM_HASH}_${CPU_ARCH}_${COMPILER}" - BUILD="${BUILD_RAW}.7z" -fi - -# BRANCH is used for experimental build warnings for pr builds, used in main_window.cpp. -# BUILD is the name of the release artifact -# BUILD_RAW is just filename -# AVVER is used for GitHub releases, it is the version number. -BRANCH="${REPO_NAME}/${REPO_BRANCH}" - -# SC2129 -{ - echo "BRANCH=$BRANCH" - echo "BUILD=$BUILD" - echo "BUILD_RAW=$BUILD_RAW" - echo "AVVER=$AVVER" -} >> .ci/ci-vars.env diff --git a/.ci/setup-windows.sh b/.ci/setup-windows.sh index 11a68367b8..bd4db40159 100755 --- a/.ci/setup-windows.sh +++ b/.ci/setup-windows.sh @@ -1,40 +1,42 @@ #!/bin/sh -ex +# These are Azure specific, so we wrap them for portability +REPO_NAME="$BUILD_REPOSITORY_NAME" +REPO_BRANCH="$SYSTEM_PULLREQUEST_SOURCEBRANCH" +PR_NUMBER="$SYSTEM_PULLREQUEST_PULLREQUESTID" + # Resource/dependency URLs # Qt mirrors can be volatile and slow, so we list 2 -#QT_HOST="http://mirrors.ocf.berkeley.edu/qt/" -QT_HOST="http://qt.mirror.constant.com/" +QT_HOST="http://mirrors.ocf.berkeley.edu/qt/" +#QT_HOST="http://qt.mirror.constant.com/" QT_URL_VER=$(echo "$QT_VER" | sed "s/\.//g") QT_VER_MSVC_UP=$(echo "${QT_VER_MSVC}" | tr '[:lower:]' '[:upper:]') -QT_PREFIX="online/qtsdkrepository/windows_x86/desktop/qt${QT_VER_MAIN}_${QT_URL_VER}/qt${QT_VER_MAIN}_${QT_URL_VER}/qt.qt${QT_VER_MAIN}.${QT_URL_VER}." -QT_PREFIX_2="win64_${QT_VER_MSVC}_64/${QT_VER}-0-${QT_DATE}" -QT_SUFFIX="-Windows-Windows_11_23H2-${QT_VER_MSVC_UP}-Windows-Windows_11_23H2-X86_64.7z" -QT_BASE_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtbase${QT_SUFFIX}" -QT_DECL_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtdeclarative${QT_SUFFIX}" -QT_TOOL_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qttools${QT_SUFFIX}" -QT_MM_URL="${QT_HOST}${QT_PREFIX}addons.qtmultimedia.${QT_PREFIX_2}qtmultimedia${QT_SUFFIX}" -QT_SVG_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtsvg${QT_SUFFIX}" -LLVMLIBS_URL="https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-${LLVM_VER}/llvmlibs_mt.7z" -VULKAN_SDK_URL="https://www.dropbox.com/scl/fi/sjjh0fc4ld281pjbl2xzu/VulkanSDK-${VULKAN_VER}-Installer.exe?rlkey=f6wzc0lvms5vwkt2z3qabfv9d&dl=1" -CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.11.2/ccache-4.11.2-windows-x86_64.zip" +QT_PREFIX="online/qtsdkrepository/windows_x86/desktop/qt${QT_VER_MAIN}_${QT_URL_VER}/qt.qt${QT_VER_MAIN}.${QT_URL_VER}.win64_${QT_VER_MSVC}_64/${QT_VER}-0-${QT_DATE}" +QT_SUFFIX="-Windows-Windows_10-${QT_VER_MSVC_UP}-Windows-Windows_10-X86_64.7z" +QT_BASE_URL="${QT_HOST}${QT_PREFIX}qtbase${QT_SUFFIX}" +QT_WINE_URL="${QT_HOST}${QT_PREFIX}qtwinextras${QT_SUFFIX}" +QT_DECL_URL="${QT_HOST}${QT_PREFIX}qtdeclarative${QT_SUFFIX}" +QT_TOOL_URL="${QT_HOST}${QT_PREFIX}qttools${QT_SUFFIX}" +LLVMLIBS_URL='https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win/llvmlibs_mt.7z' +GLSLANG_URL='https://github.com/RPCS3/glslang/releases/download/custom-build-win/glslanglibs_mt.7z' +VULKAN_SDK_URL="https://www.dropbox.com/s/fjlh97hrz0crqxv/VulkanSDK-1.2.182.0-Installer.exe" DEP_URLS=" \ $QT_BASE_URL \ + $QT_WINE_URL \ $QT_DECL_URL \ $QT_TOOL_URL \ - $QT_MM_URL \ - $QT_SVG_URL \ $LLVMLIBS_URL \ - $VULKAN_SDK_URL\ - $CCACHE_URL" + $GLSLANG_URL \ + $VULKAN_SDK_URL" -# CI doesn't make a cache dir if it doesn't exist, so we do it manually -[ -d "$DEPS_CACHE_DIR" ] || mkdir "$DEPS_CACHE_DIR" +# Azure pipelines doesn't make a cache dir if it doesn't exist, so we do it manually +[ -d "$CACHE_DIR" ] || mkdir "$CACHE_DIR" # Pull all the submodules except llvm, since it is built separately and we just download that build # Note: Tried to use git submodule status, but it takes over 20 seconds # shellcheck disable=SC2046 -git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/FAudio/ && !/llvm/ { print $3 }' .gitmodules) +git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/llvm/ { print $3 }' .gitmodules) # Git bash doesn't have rev, so here it is rev() @@ -53,44 +55,61 @@ download_and_verify() fileName="$4" for _ in 1 2 3; do - [ -e "$DEPS_CACHE_DIR/$fileName" ] || curl -fLo "$DEPS_CACHE_DIR/$fileName" "$url" - fileChecksum=$("${algo}sum" "$DEPS_CACHE_DIR/$fileName" | awk '{ print $1 }') + [ -e "$CACHE_DIR/$fileName" ] || curl -L -o "$CACHE_DIR/$fileName" "$url" + fileChecksum=$("${algo}sum" "$CACHE_DIR/$fileName" | awk '{ print $1 }') [ "$fileChecksum" = "$correctChecksum" ] && return 0 + rm "$CACHE_DIR/$fileName" done return 1; } # Some dependencies install here -[ -d "./build/lib_ext/Release-x64" ] || mkdir -p "./build/lib_ext/Release-x64" +[ -d "./lib" ] || mkdir "./lib" for url in $DEP_URLS; do - # Get the filename from the URL and remove query strings (?arg=something). - fileName="$(rev "$(rev "$url" | cut -d'/' -f1)" | cut -d'?' -f1)" + # Get the filename from the URL. Breaks if urls have js args, so don't do that pls + fileName="$(rev "$(rev "$url" | cut -d'/' -f1)")" [ -z "$fileName" ] && echo "Unable to parse url: $url" && exit 1 # shellcheck disable=SC1003 case "$url" in - *qt*) checksum=$(curl -fL "${url}.sha1"); algo="sha1"; outDir="$QTDIR/" ;; - *llvm*) checksum=$(curl -fL "${url}.sha256"); algo="sha256"; outDir="./build/lib_ext/Release-x64" ;; - *ccache*) checksum=$CCACHE_SHA; algo="sha256"; outDir="$CCACHE_BIN_DIR" ;; + *qt*) checksum=$(curl -L "${url}.sha1"); algo="sha1"; outDir='C:\Qt\' ;; + *llvm*) checksum=$(curl -L "${url}.sha256"); algo="sha256"; outDir="." ;; + *glslang*) checksum=$(curl -L "${url}.sha256"); algo="sha256"; outDir="./lib/Release-x64" ;; *Vulkan*) # Vulkan setup needs to be run in batch environment # Need to subshell this or else it doesn't wait download_and_verify "$url" "$VULKAN_SDK_SHA" "sha256" "$fileName" - cp "$DEPS_CACHE_DIR/$fileName" . - _=$(echo "$fileName --accept-licenses --default-answer --confirm-command install" | cmd) + cp "$CACHE_DIR/$fileName" . + _=$(echo "$fileName /S" | cmd) continue ;; *) echo "Unknown url resource: $url"; exit 1 ;; esac download_and_verify "$url" "$checksum" "$algo" "$fileName" - 7z x -y "$DEPS_CACHE_DIR/$fileName" -aos -o"$outDir" + 7z x -y "$CACHE_DIR/$fileName" -aos -o"$outDir" done -# Setup ccache tool -[ -d "$CCACHE_DIR" ] || mkdir -p "$(cygpath -u "$CCACHE_DIR")" -CCACHE_SH_DIR=$(cygpath -u "$CCACHE_BIN_DIR") -mv "$CCACHE_SH_DIR"/ccache-*/* "$CCACHE_SH_DIR" -cp "$CCACHE_SH_DIR"/ccache.exe "$CCACHE_SH_DIR"/cl.exe +# Gather explicit version number and number of commits +COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) +COMM_COUNT=$(git rev-list --count HEAD) +COMM_HASH=$(git rev-parse --short=8 HEAD) + +# Format the above into filenames +if [ -n "$PR_NUMBER" ]; then + AVVER="${COMM_TAG}-${COMM_HASH}" + BUILD="rpcs3-v${AVVER}_win64.7z" +else + AVVER="${COMM_TAG}-${COMM_COUNT}" + BUILD="rpcs3-v${AVVER}-${COMM_HASH}_win64.7z" +fi + +# BRANCH is used for experimental build warnings for pr builds, used in main_window.cpp. +# BUILD is the name of the release artifact +# AVVER is used for GitHub releases, it is the version number. +BRANCH="${REPO_NAME}/${REPO_BRANCH}" +echo "BRANCH=$BRANCH" > .ci/azure-vars.env +echo "BUILD=$BUILD" >> .ci/azure-vars.env +echo "AVVER=$AVVER" >> .ci/azure-vars.env diff --git a/.cirrus.yml b/.cirrus.yml new file mode 100644 index 0000000000..efdf6364e7 --- /dev/null +++ b/.cirrus.yml @@ -0,0 +1,113 @@ +env: + CIRRUS_CLONE_DEPTH: 0 # Unshallow clone to obtain proper GIT_VERSION + BUILD_REPOSITORY_NAME: $CIRRUS_REPO_FULL_NAME + SYSTEM_PULLREQUEST_SOURCEBRANCH: $CIRRUS_BRANCH + SYSTEM_PULLREQUEST_PULLREQUESTID: $CIRRUS_PR + BUILD_SOURCEVERSION: $CIRRUS_CHANGE_IN_REPO +# RPCS3_TOKEN: ENCRYPTED[13a0f18de9285e6c880e3aecbb54258245a74872af7d0fcb92447fa1200ceb6a4dc8acc880ddbf5e653d46336563ca72] # Replace this when we're using Cirrus for master releases!! + +windows_task: + matrix: + - name: Cirrus Windows + windows_container: + image: cirrusci/windowsservercore:visualstudio2019 + cpu: 8 + memory: 16G + env: + CIRRUS_SHELL: "bash" + COMPILER: msvc + QT_VER_MAIN: '5' + BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}\artifacts\ + QT_VER: '5.15.2' + QT_VER_MSVC: 'msvc2019' + QT_DATE: '202011130602' + QTDIR: C:\Qt\${QT_VER}\${QT_VER_MSVC}_64 + VULKAN_VER: '1.2.182.0' + VULKAN_SDK_SHA: '7089d48011f06c4a02a593913c6a9385d1652ab7765c25d2666d5d6ef4e24af8' + VULKAN_SDK: C:\VulkanSDK\${VULKAN_VER} + CACHE_DIR: "./cache" + UPLOAD_COMMIT_HASH: 7d09e3be30805911226241afbb14f8cdc2eb054e + UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-win" + deps_cache: + folder: "./cache" + #obj_cache: + # folder: "./tmp" + #obj2_cache: + # folder: "./rpcs3/x64" + setup_script: + - './.ci/get_keys-windows.sh' + - './.ci/setup-windows.sh' +# - choco install -y python # Needed for SPIRV, use either this or make a new Docker image +# spirv_script: +# - export PATH=${PATH}:"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin" +# - cd "${CIRRUS_WORKING_DIR}/3rdparty/SPIRV" +# - msbuild.exe spirv.vcxproj //p:Configuration=Release //m + rpcs3_script: + - export PATH=${PATH}:"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin" + - msbuild.exe rpcs3.sln //p:Configuration=Release //m + deploy_script: + - mkdir artifacts + - source './.ci/export-cirrus-vars.sh' + - './.ci/deploy-windows.sh' + artifacts: + name: Artifact + path: "*.7z*" +# push_script: | +# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ]; then +# source './.ci/export-cirrus-vars.sh' +# './.ci/github-upload.sh' +# fi; + +linux_task: + container: + image: rpcs3/rpcs3-ci-bionic:1.1 + cpu: 4 + memory: 16G + env: + BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}/artifacts + CCACHE_DIR: "/tmp/ccache_dir" + CCACHE_MAXSIZE: 300M + CI_HAS_ARTIFACTS: true + UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f + UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux" + DEPLOY_APPIMAGE: true + APPDIR: "./appdir" + ARTDIR: ${CIRRUS_WORKING_DIR}/artifacts/ + RELEASE_MESSAGE: "../GitHubReleaseMessage.txt" + ccache_cache: + folder: "/tmp/ccache_dir" + matrix: + - name: Cirrus Linux GCC + env: + COMPILER: gcc + gcc_script: + - mkdir artifacts + - ".ci/build-linux.sh" + - name: Cirrus Linux Clang + env: + COMPILER: clang + clang_script: + - mkdir artifacts + - ".ci/build-linux.sh" + artifacts: + name: Artifact + path: "artifacts/*" +# push_script: | +# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ] && [ "$COMPILER" = "gcc" ]; then +# '.ci/github-upload.sh' +# fi; + +freebsd_task: + matrix: + - name: Cirrus FreeBSD + freebsd_instance: + image_family: freebsd-13-0 + cpu: 8 + memory: 8G + env: + CCACHE_MAXSIZE: 300M # 3x clean build, rounded + CCACHE_DIR: /tmp/ccache_dir + ccache_cache: + folder: /tmp/ccache_dir + install_script: "sh -ex ./.ci/install-freebsd.sh" + script: "./.ci/build-freebsd.sh" diff --git a/.clang-format b/.clang-format index 08b2119b34..99b39f193e 100644 --- a/.clang-format +++ b/.clang-format @@ -1,5 +1,5 @@ Standard: c++20 -UseTab: AlignWithSpaces +UseTab: ForIndentation TabWidth: 4 IndentWidth: 4 AccessModifierOffset: -4 @@ -16,12 +16,11 @@ AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: true AllowShortFunctionsOnASingleLine: Empty AllowShortLoopsOnASingleLine: false -AllowShortLambdasOnASingleLine: Empty Cpp11BracedListStyle: true IndentCaseLabels: false SortIncludes: false ReflowComments: true -AlignConsecutiveAssignments: false +AlignConsecutiveAssignments: true AlignTrailingComments: true AlignAfterOpenBracket: DontAlign ConstructorInitializerAllOnOneLineOrOnePerLine: false diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index d719944078..a0453b46bc 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -16,4 +16,4 @@ Submitting your test results for Commercial Games must be done on our forums. Pl # Contributing -Check the [Coding Style Guidelines](https://github.com/RPCS3/rpcs3/wiki/Coding-Style) and [Developer Information](https://github.com/RPCS3/rpcs3/wiki/Developer-Information). If you have any questions, hit us up on our [Discord Server](https://discord.gg/rpcs3) in the **#development** channel. +Check the [Coding Style Guidelines](https://github.com/RPCS3/rpcs3/wiki/Coding-Style) and [Developer Information](https://github.com/RPCS3/rpcs3/wiki/Developer-Information). If you have any questions, hit us up on our [Discord Server](https://discord.me/RPCS3) in the **#development** channel. diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index f41f7323c4..76a7ca9a61 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1 +1,2 @@ patreon: Nekotekina +custom: https://rpcs3.net/alipay diff --git a/.github/ISSUE_TEMPLATE/1-regression-report.md b/.github/ISSUE_TEMPLATE/1-regression-report.md new file mode 100644 index 0000000000..b7cca103e9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1-regression-report.md @@ -0,0 +1,66 @@ +--- +name: Regression report +about: If something used to work before, but now it doesn't +title: '' +labels: '' +assignees: '' + +--- + +## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. + +## Quick summary +Please briefly describe what has stopped working correctly. + +## Details +Please describe the regression as accurately as possible. + +#### 0. Make sure you're running with settings as close to default as possible +* **Do NOT enable any emulator game patches when reporting issues** +* Only change settings that are required for the game to work + +#### 1. Please provide _exact_ build (or commit) information that introduced the regression you're reporting. +* Please see [How to find the build that caused a regression](https://wiki.rpcs3.net/index.php?title=Help:Using_different_versions_of_RPCS3#How_to_find_the_build_that_caused_a_regression.3F) in our wiki. +* You can find [History of RPCS3 builds](https://rpcs3.net/compatibility?b) here. + +#### 2. Please attach TWO logs: +* One for the build with regression. +* One for the build that works as expected. + +##### How to obtain RPCS3's log: +* Run the game until you find the regression. +* Completely close RPCS3, or move to the next step in case it has crashed. +* Locate RPCS3's log file: + + ```RPCS3.log``` (It can show up as just RPCS3 and have a notepad icon) + or + + ```RPCS3.log.gz``` (It can show up as RPCS3.log and have a zip or rar icon) +![image](https://user-images.githubusercontent.com/44116740/84433247-aa15fa80-ac36-11ea-913e-6fe25a1d040e.png) + On Linux it will be in ```~/.cache/rpcs3/```. + On Windows it will be near the executable. +* Attach the log: + + Drag and drop your log file into the issue. + + Or upload it to the cloud, such as Dropbox, Mega etc. + +#### 3. If you describe graphical regression, please provide an RSX capture and a RenderDoc capture that demonstrate it. +* To create an RSX capture, use _Create_ _RSX_ _Capture_ under _Utilities_. +Captures will be stored in RPCS3 folder → captures. + + Compress your capture with 7z, Rar etc. + And drag and drop it into the issue. + + Or upload it to the cloud, such as Dropbox, Mega etc. +* To create a RenderDoc capture, please refer to [RenderDoc's documentation](https://renderdoc.org/docs/how/how_capture_frame.html). + +#### 4. Please attach screenshots of your problem. +* Enable performance overlay with at least medium level of detail. +You can find it in _Emulator_ tab in _Settings_. + +#### 5. Please provide comparison with real PS3. + +#### 6. Please provide your system configuration: +* OS +* CPU +* GPU +* Driver version +* etc. + +#### Please include. +* Anything else you deem to be important diff --git a/.github/ISSUE_TEMPLATE/1-regression-report.yml b/.github/ISSUE_TEMPLATE/1-regression-report.yml deleted file mode 100644 index 7c8659b62e..0000000000 --- a/.github/ISSUE_TEMPLATE/1-regression-report.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: Regression report -description: If something used to work before, but now it doesn't -title: "[Regression] Enter a title here" -labels: [] -body: - - type: markdown - attributes: - value: | - # Summary - Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. - - type: textarea - id: quick-summary - attributes: - label: Quick summary - description: Please briefly describe what has stopped working correctly. - validations: - required: true - - type: textarea - id: details - attributes: - label: Details - description: Please describe the regression as accurately as possible. Include screenshots if neccessary. - validations: - required: false - - type: markdown - attributes: - value: | - # Identify the regressed build - Please provide the _exact_ build (or commit) information that introduced the regression you're reporting. - * Please see [How to find the build that caused a regression](https://wiki.rpcs3.net/index.php?title=Help:Using_different_versions_of_RPCS3#How_to_find_the_build_that_caused_a_regression.3F) in our wiki. - * You can find [History of RPCS3 builds](https://rpcs3.net/compatibility?b) here. - - Make sure you're running with settings as close to default as possible - * **Do NOT enable any emulator game patches when reporting issues** - * Only change settings that are required for the game to work - - type: input - id: regressed-build - attributes: - label: Build with regression - description: Provide _exact_ build (or commit) information that introduced the regression you're reporting. - placeholder: v0.0.23-13948-31df99f7 - validations: - required: true - - type: markdown - attributes: - value: | - # Log files - Obtaining the log file: - * Run the game until you find the regression. - * Completely close RPCS3 and locate the log file. - - RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon). - * On Windows it will be in the ```log``` folder inside your RPCS3 folder. - * On Linux it will be in ```~/.cache/rpcs3/``` - * On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter. - - type: textarea - id: logs - attributes: - label: Attach two log files - description: | - Attach one file for the build with regression and another for a build that works. - Drag & drop the files into this input field, or upload them to another service (f.ex. Dropbox, Mega) and provide a link. - validations: - required: true - - type: markdown - attributes: - value: | - # Other details - If you describe a graphical regression, please provide an RSX capture and/or a RenderDoc capture that demonstrate it. - * To create an RSX capture, use _Create_ _RSX_ _Capture_ under _Utilities_. - * Captures will be stored in RPCS3 folder → captures. - * To create a RenderDoc capture, please refer to [RenderDoc's documentation](https://renderdoc.org/docs/how/how_capture_frame.html). - - type: textarea - id: captures - attributes: - label: Attach capture files for visual issues - description: Compress your capture with 7z, Rar etc. and attach it here, or upload it to the cloud (Dropbox, Mega etc) and add a link to it. - validations: - required: false - - type: textarea - id: system-info - attributes: - label: System configuration - description: Provide information about your system, such as operating system, CPU and GPU model, GPU driver version and other information that describes your system configuration. - validations: - required: false - - type: textarea - id: other-details - attributes: - label: Other details - description: Include anything else you deem to be important. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/2-bug-report.md b/.github/ISSUE_TEMPLATE/2-bug-report.md new file mode 100644 index 0000000000..9019e16326 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2-bug-report.md @@ -0,0 +1,59 @@ +--- +name: Bug report +about: If something doesn't work correctly in RPCS3 +title: '' +labels: '' +assignees: '' + +--- + +## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. + +## Quick summary +Please briefly describe what is not working correctly. + +## Details + +Please describe the problem as accurately as possible. + +#### 0. Make sure you're running with settings as close to default as possible +* **Do NOT enable any emulator game patches when reporting issues** +* Only change settings that are required for the game to work + +#### 1. Please attach RPCS3's log. +* Run the game until you find the issue. +* Completely close RPCS3, or move to the next step in case it has crashed. +* Locate RPCS3's log file: + + ```RPCS3.log``` (It can show up as just RPCS3 and have a notepad icon) + or + + ```RPCS3.log.gz``` (It can show up as RPCS3.log and have a zip or rar icon) +![image](https://user-images.githubusercontent.com/44116740/84433247-aa15fa80-ac36-11ea-913e-6fe25a1d040e.png) + On Linux it will be in ```~/.cache/rpcs3/```. + On Windows it will be near the executable. +* Attach the log: + + Drag and drop your log file into the issue. + + Or upload it to the cloud, such as Dropbox, Mega etc. + +#### 2. If you describe graphical issue, please provide an RSX capture and a RenderDoc capture that demonstrate it. +* To create an RSX capture, use _Create_ _RSX_ _Capture_ under _Utilities_. +Captures will be stored in RPCS3 folder → captures. + + Compress your capture with 7z, Rar etc. + And drag and drop it into the issue. + + Or upload it to the cloud, such as Dropbox, Mega etc. +* To create a RenderDoc capture, please refer to [RenderDoc's documentation](https://renderdoc.org/docs/how/how_capture_frame.html). + +#### 3. Please attach screenshots of your problem. +* Enable performance overlay with at least medium level of detail. +You can find it in _Emulator_ tab in _Settings_. + +#### 4. Please provide comparison with real PS3. + +#### 5. Please provide your system configuration: +* OS +* CPU +* GPU +* Driver version +* etc. + +#### Please include. +* Anything else you deem to be important diff --git a/.github/ISSUE_TEMPLATE/2-bug-report.yml b/.github/ISSUE_TEMPLATE/2-bug-report.yml deleted file mode 100644 index 4a82a03008..0000000000 --- a/.github/ISSUE_TEMPLATE/2-bug-report.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Bug report -description: If something doesn't work correctly in RPCS3 -title: "Enter a title here" -labels: [] -body: - - type: markdown - attributes: - value: | - # Summary - Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. - - type: textarea - id: quick-summary - attributes: - label: Quick summary - description: Please briefly describe what is not working correctly. - validations: - required: true - - type: textarea - id: details - attributes: - label: Details - description: | - Please describe the problem as accurately as possible. - Provide a comparison with a real PS3, if possible. - validations: - required: false - - type: markdown - attributes: - value: | - # Log files - - Provide a log file that includes the bug you're reporting. - - Obtaining the log file: - * Run the game until you find the regression. - * Completely close RPCS3 and locate the log file. - - RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon). - * On Windows it will be in the ```log``` folder inside your RPCS3 folder. - * On Linux it will be in ```~/.cache/rpcs3/``` - * On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter. - - type: textarea - id: logs - attributes: - label: Attach a log file - description: | - Drag & drop the files into this input field, or upload them to another service (f.ex. Dropbox, Mega) and provide a link. - validations: - required: true - - type: markdown - attributes: - value: | - # Other details - If you describe a graphical issue, please provide an RSX capture and/or a RenderDoc capture that demonstrate it. - * To create an RSX capture, use _Create_ _RSX_ _Capture_ under _Utilities_. - * Captures will be stored in RPCS3 folder → captures. - * To create a RenderDoc capture, please refer to [RenderDoc's documentation](https://renderdoc.org/docs/how/how_capture_frame.html). - - type: textarea - id: captures - attributes: - label: Attach capture files for visual issues - description: Compress your capture with 7z, Rar etc. and attach it here, or upload it to the cloud (Dropbox, Mega etc) and add a link to it. - validations: - required: false - - type: textarea - id: system-info - attributes: - label: System configuration - description: Provide information about your system, such as operating system, CPU and GPU model, GPU driver version and other information that describes your system configuration. - validations: - required: false - - type: textarea - id: other-details - attributes: - label: Other details - description: Include anything else you deem to be important. - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/3-feature-request.md b/.github/ISSUE_TEMPLATE/3-feature-request.md new file mode 100644 index 0000000000..93bc14ce2d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/3-feature-request.md @@ -0,0 +1,41 @@ +--- +name: Feature Request +about: If RPCS3 lacks a feature you would like to see +title: '[Feature Request] Enter Feature Title Here' +labels: '' +assignees: '' + +--- + +## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. + +## Quick summary +Please briefly describe what feature you would like RPCS3 to have. + +## Details + +Please describe the feature as accurately as possible. + +#### 1. Please describe, what part of RPCS3 would be affected by your feature: +* Gameplay +* Debugging +* UI +* Patches +* Installation +* Github +* CI +* etc. + +#### 2. Please tell us, why your feature is important to RPCS3. + +#### 3. Please attach screenshots of the feature implemented in other projects. + +#### 4. Please provide your system configuration: +* OS +* CPU +* GPU +* Driver version +* etc. + +#### Please include. +* Anything else you deem to be important \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/3-feature-request.yml b/.github/ISSUE_TEMPLATE/3-feature-request.yml deleted file mode 100644 index 153c44c41c..0000000000 --- a/.github/ISSUE_TEMPLATE/3-feature-request.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Feature request -description: If RPCS3 lacks a feature you would like to see -title: "[Feature request] Enter a title here" -labels: [] -body: - - type: markdown - attributes: - value: | - Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. - - type: textarea - id: quick-summary - attributes: - label: Quick summary - description: Please briefly describe what feature you would like RPCS3 to have. - validations: - required: true - - type: textarea - id: details - attributes: - label: Details - description: Please describe the feature as accurately as possible. - validations: - required: false - - type: markdown - attributes: - value: | - Please include the following information: - * What part of RPCS3 would be affected by your feature? (Gameplay, Debugging, UI, Patches, Installation, CI etc.) - * Why your feature is important to RPCS3. - * If the feature is implemented in other projects, attach screenshots. - * If this feature is something that a game is trying to use, upload a log file for it. - - RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon). - * On Windows it will be in the ```log``` folder inside your RPCS3 folder. - * On Linux it will be in ```~/.cache/rpcs3/``` - * On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter. diff --git a/.github/ISSUE_TEMPLATE/4-advanced.md b/.github/ISSUE_TEMPLATE/4-advanced.md deleted file mode 100644 index 78a9d96178..0000000000 --- a/.github/ISSUE_TEMPLATE/4-advanced.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Advanced -about: For developers and experienced users only -title: '' -labels: '' -assignees: '' - ---- - -## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. - -You're using the advanced template. You're expected to know what to write in order to fill in all the required information for proper report. - -If you're unsure on what to do, please return back to the issue type selection and choose different category. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 1dd094e1dc..137afb75a2 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,7 +4,7 @@ contact_links: url: https://rpcs3.net/quickstart about: Everything you need to know to install and configure emulator, and add games - name: Ask for help - url: https://discord.gg/rpcs3 + url: https://discord.me/RPCS3 about: If you have some questions or need help, please use our Discord server instead of GitHub - name: Report game compatibility url: https://forums.rpcs3.net/thread-196671.html diff --git a/.github/PR-BUILD.md b/.github/PR-BUILD.md new file mode 100644 index 0000000000..fb9a0aeb17 --- /dev/null +++ b/.github/PR-BUILD.md @@ -0,0 +1,18 @@ +## How to test a PR build + +Please take into account, that RPCS3 build usually takes some time (about 15 mins), so you can't access a build if a PR was just submitted. + +- Open a PR you want to test +- Scroll to the very bottom and locate the **Checks** section +- Click on **Show all checks** + You are supposed to see something like this + ![image](https://user-images.githubusercontent.com/10283761/116630952-2cd99e00-a94c-11eb-933e-986d6020ca92.png) +- Click on __Details__ on either **Cirrus Linux GCC** or **Cirrus Windows** +- Click **View more details on Cirrus CI** at the very bottom + ![image](https://user-images.githubusercontent.com/10283761/116631111-5e526980-a94c-11eb-95f7-751e6f15e1ea.png) +- Click on the download button for **Artifact** on the **Artifacts** block + ![image](https://user-images.githubusercontent.com/10283761/116631322-bee1a680-a94c-11eb-89a3-be365783582e.png) + +- Congratulations! You are now downloading an RPCS3 build for that specific PR. + +__Please note that PR builds are not supposed to be stable because they contain new changesets.__ diff --git a/.github/PULL_REQUEST_TEMPLATE/1-default.md b/.github/PULL_REQUEST_TEMPLATE/1-default.md new file mode 100644 index 0000000000..710d07ae2a --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/1-default.md @@ -0,0 +1,3 @@ + + +[How to test this PR](.github/PR-BUILD.md) \ No newline at end of file diff --git a/.github/workflows/llvm.yml b/.github/workflows/llvm.yml deleted file mode 100644 index e3e3e76c50..0000000000 --- a/.github/workflows/llvm.yml +++ /dev/null @@ -1,72 +0,0 @@ -name: Build LLVM - -defaults: - run: - shell: bash -on: - workflow_dispatch: - -concurrency: - group: ${{ github.ref }}-${{ github.event_name }} - cancel-in-progress: true - -env: - BUILD_ARTIFACTSTAGINGDIRECTORY: ${{ github.workspace }}/artifacts/ - -jobs: - Windows_Build: - if: github.event_name == 'workflow_dispatch' - name: LLVM Windows (MSVC) - runs-on: windows-2025 - env: - COMPILER: msvc - CCACHE_SHA: '1f39f3ad5aae3fe915e99ad1302633bc8f6718e58fa7c0de2b0ba7e080f0f08c' - CCACHE_BIN_DIR: 'C:\ccache_bin' - CCACHE_DIR: 'C:\ccache' - CCACHE_INODECACHE: 'true' - CCACHE_SLOPPINESS: 'time_macros' - DEPS_CACHE_DIR: ./dependency_cache - steps: - - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore Dependencies Cache - uses: actions/cache/restore@main - id: restore-dependencies-cache - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: "${{ runner.os }}-${{ env.COMPILER }}-llvm-${{ env.CCACHE_SHA }}" - restore-keys: ${{ runner.os }}-${{ env.COMPILER }}-llvm - - - name: Download and unpack dependencies - run: .ci/setup-llvm.sh - - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@main - - - name: Compile LLVM - shell: pwsh - run: msbuild 3rdparty\llvm\llvm_build.vcxproj /p:SolutionDir="$(pwd)/" /p:Configuration=Release /v:minimal /p:Platform=x64 /p:PreferredToolArchitecture=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets" - - - name: Pack up build artifacts - run: | - mkdir -p "${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}" - .ci/deploy-llvm.sh - - - name: Upload artifacts (7z) - uses: actions/upload-artifact@main - with: - name: LLVM for Windows (MSVC) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - if-no-files-found: error - - - name: Save Dependencies Cache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: ${{ steps.restore-dependencies-cache.outputs.cache-primary-key }} diff --git a/.github/workflows/qt-ts.yml b/.github/workflows/qt-ts.yml deleted file mode 100644 index 4596be29e0..0000000000 --- a/.github/workflows/qt-ts.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Generate Translation Template - -on: - workflow_dispatch: - push: - branches: - - master - paths: - - 'rpcs3/**' - -jobs: - Generate_Translation_Template: - name: Generate Translation Template - runs-on: ubuntu-24.04 - steps: - - name: Checkout repository - uses: actions/checkout@main - - - name: Install Qt Tools - run: | - sudo apt update - sudo apt install -y qt6-l10n-tools - - - name: Generate .ts file using lupdate (Qt) - working-directory: rpcs3 - run: | - ../.ci/generate-qt-ts.sh - - - name: Upload translation template - uses: actions/upload-artifact@main - with: - name: RPCS3_Translation_Template - path: translations/rpcs3_template.ts - compression-level: 0 \ No newline at end of file diff --git a/.github/workflows/rpcs3.yml b/.github/workflows/rpcs3.yml deleted file mode 100644 index 1981f78edb..0000000000 --- a/.github/workflows/rpcs3.yml +++ /dev/null @@ -1,444 +0,0 @@ -name: Build RPCS3 - -defaults: - run: - shell: bash -on: - push: - branches: - - master # Only trigger push event on 'master' branch - pull_request: - workflow_dispatch: - -concurrency: - group: ${{ github.ref }}-${{ github.event_name }} - cancel-in-progress: true - -env: - BUILD_REPOSITORY_NAME: ${{ github.repository }} - BUILD_SOURCEBRANCHNAME: ${{ github.ref_name }} - BUILD_PR_NUMBER: ${{ github.event.pull_request.number }} - BUILD_SOURCEVERSION: ${{ github.sha }} - BUILD_ARTIFACTSTAGINGDIRECTORY: ${{ github.workspace }}/artifacts/ - -jobs: - Linux_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - strategy: - fail-fast: false - matrix: - include: - - os: ubuntu-24.04 - docker_img: "rpcs3/rpcs3-ci-jammy:1.6" - build_sh: "/rpcs3/.ci/build-linux.sh" - compiler: clang - UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f - UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux" - - os: ubuntu-24.04 - docker_img: "rpcs3/rpcs3-ci-jammy:1.6" - build_sh: "/rpcs3/.ci/build-linux.sh" - compiler: gcc - - os: ubuntu-24.04-arm - docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.6" - build_sh: "/rpcs3/.ci/build-linux-aarch64.sh" - compiler: clang - UPLOAD_COMMIT_HASH: a1d35836e8d45bfc6f63c26f0a3e5d46ef622fe1 - UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux-arm64" - - os: ubuntu-24.04-arm - docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.6" - build_sh: "/rpcs3/.ci/build-linux-aarch64.sh" - compiler: gcc - name: RPCS3 Linux ${{ matrix.os }} ${{ matrix.compiler }} - runs-on: ${{ matrix.os }} - env: - CCACHE_DIR: ${{ github.workspace }}/ccache - DEPLOY_APPIMAGE: true - APPDIR: "/rpcs3/build/appdir" - ARTDIR: "/root/artifacts" - RELEASE_MESSAGE: "/rpcs3/GitHubReleaseMessage.txt" - COMPILER: ${{ matrix.compiler }} - UPLOAD_COMMIT_HASH: ${{ matrix.UPLOAD_COMMIT_HASH }} - UPLOAD_REPO_FULL_NAME: ${{ matrix.UPLOAD_REPO_FULL_NAME }} - RUN_UNIT_TESTS: github.event_name == 'pull_request' && 'ON' || 'OFF' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-${{github.run_id}} - restore-keys: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}- - - - name: Docker setup and build - run: | - docker pull --quiet ${{ matrix.docker_img }} - docker run \ - -v $PWD:/rpcs3 \ - --env-file .ci/docker.env \ - -v ${{ env.CCACHE_DIR }}:/root/.ccache \ - -v ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}:${{ env.ARTDIR }} \ - ${{ matrix.docker_img }} \ - ${{ matrix.build_sh }} - - - name: Upload artifacts - uses: actions/upload-artifact@main - with: - name: RPCS3 for Linux (${{ runner.arch }}, ${{ matrix.compiler }}) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}/*.AppImage - compression-level: 0 - - - name: Deploy master build to GitHub Releases - if: | - github.event_name != 'pull_request' && - github.repository == 'RPCS3/rpcs3' && - github.ref == 'refs/heads/master' && - matrix.compiler == 'clang' - env: - RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }} - run: | - COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) - COMM_COUNT=$(git rev-list --count HEAD) - COMM_HASH=$(git rev-parse --short=8 HEAD) - export AVVER="${COMM_TAG}-${COMM_COUNT}" - .ci/github-upload.sh - - - name: Save build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - Mac_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - strategy: - fail-fast: false - matrix: - include: - - name: Intel - build_sh: "arch -X86_64 .ci/build-mac.sh" - UPLOAD_COMMIT_HASH: 51ae32f468089a8169aaf1567de355ff4a3e0842 - UPLOAD_REPO_FULL_NAME: rpcs3/rpcs3-binaries-mac - - name: Apple Silicon - build_sh: .ci/build-mac-arm64.sh - UPLOAD_COMMIT_HASH: 8e21bdbc40711a3fccd18fbf17b742348b0f4281 - UPLOAD_REPO_FULL_NAME: rpcs3/rpcs3-binaries-mac-arm64 - name: RPCS3 Mac ${{ matrix.name }} - runs-on: macos-14 - env: - CCACHE_DIR: /tmp/ccache_dir - QT_VER: '6.7.3' - QT_VER_MAIN: '6' - LLVM_COMPILER_VER: '19' - RELEASE_MESSAGE: ../GitHubReleaseMessage.txt - UPLOAD_COMMIT_HASH: ${{ matrix.UPLOAD_COMMIT_HASH }} - UPLOAD_REPO_FULL_NAME: ${{ matrix.UPLOAD_REPO_FULL_NAME }} - RUN_UNIT_TESTS: github.event_name == 'pull_request' && 'ON' || 'OFF' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore Build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ runner.os }}-ccache-${{ matrix.name }}-${{github.run_id}} - restore-keys: ${{ runner.os }}-ccache-${{ matrix.name }}- - - - name: Restore Qt Cache - uses: actions/cache/restore@main - id: restore-qt-cache - with: - path: /tmp/Qt - key: ${{ runner.os }}-qt-${{ matrix.name }}-${{ env.QT_VER }} - restore-keys: ${{ runner.os }}-qt-${{ matrix.name }}-${{ env.QT_VER }} - - - name: Build - run: ${{ matrix.build_sh }} - - - name: Upload artifacts - uses: actions/upload-artifact@main - with: - name: RPCS3 for Mac (${{ matrix.name }}) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - - - name: Export Variables - run: | - while IFS='=' read -r key val; do - # Skip lines that are empty or start with '#' - [[ -z "$key" || "$key" =~ ^# ]] && continue - echo "$key=$val" >> "${{ github.env }}" - done < .ci/ci-vars.env - - - name: Deploy master build to GitHub Releases - if: | - github.event_name != 'pull_request' && - github.repository == 'RPCS3/rpcs3' && - github.ref == 'refs/heads/master' - env: - RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }} - run: .ci/github-upload.sh - - - name: Save Build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - - name: Save Qt Cache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: /tmp/Qt - key: ${{ steps.restore-qt-cache.outputs.cache-primary-key }} - - Windows_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - name: RPCS3 Windows - runs-on: windows-2025 - env: - COMPILER: msvc - QT_VER_MAIN: '6' - QT_VER: '6.9.1' - QT_VER_MSVC: 'msvc2022' - QT_DATE: '202505291653' - LLVM_VER: '19.1.7' - VULKAN_VER: '1.3.268.0' - VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5' - CCACHE_SHA: '1f39f3ad5aae3fe915e99ad1302633bc8f6718e58fa7c0de2b0ba7e080f0f08c' - CCACHE_BIN_DIR: 'C:\ccache_bin' - CCACHE_DIR: 'C:\ccache' - CCACHE_INODECACHE: 'true' - CCACHE_SLOPPINESS: 'time_macros' - DEPS_CACHE_DIR: ./dependency_cache - UPLOAD_COMMIT_HASH: 7d09e3be30805911226241afbb14f8cdc2eb054e - UPLOAD_REPO_FULL_NAME: "RPCS3/rpcs3-binaries-win" - steps: - - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Setup NuGet - uses: nuget/setup-nuget@v2 - - - name: Restore NuGet packages - run: nuget restore rpcs3.sln - - - name: Setup env - shell: pwsh - run: | - echo "QTDIR=C:\Qt\${{ env.QT_VER }}\${{ env.QT_VER_MSVC }}_64" >> ${{ github.env }} - echo "VULKAN_SDK=C:\VulkanSDK\${{ env.VULKAN_VER }}" >> ${{ github.env }} - - - name: Get Cache Keys - run: .ci/get_keys-windows.sh - - - name: Restore Build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: "${{ runner.os }}-ccache-${{ env.COMPILER }}-${{github.run_id}}" - restore-keys: ${{ runner.os }}-ccache-${{ env.COMPILER }}- - - - name: Restore Dependencies Cache - uses: actions/cache/restore@main - id: restore-dependencies-cache - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: "${{ runner.os }}-${{ env.COMPILER }}-${{ env.QT_VER }}-${{ env.VULKAN_SDK_SHA }}-${{ env.CCACHE_SHA }}-${{ hashFiles('llvm.lock') }}" - restore-keys: ${{ runner.os }}-${{ env.COMPILER }}- - - - name: Download and unpack dependencies - run: | - .ci/setup-windows.sh - .ci/setup-windows-ci-vars.sh win64 msvc - - - name: Export Variables - run: | - while IFS='=' read -r key val; do - # Skip lines that are empty or start with '#' - [[ -z "$key" || "$key" =~ ^# ]] && continue - echo "$key=$val" >> "${{ github.env }}" - done < .ci/ci-vars.env - - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@main - - - name: Compile RPCS3 - shell: pwsh - run: msbuild rpcs3.sln /p:Configuration=Release /v:minimal /p:Platform=x64 /p:PreferredToolArchitecture=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets" - - - name: Run Unit Tests - if: github.event_name == 'pull_request' - shell: pwsh - run: build\lib\Release-x64\rpcs3_test.exe - - - name: Pack up build artifacts - run: | - mkdir -p "${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}" - .ci/deploy-windows.sh - - - name: Upload artifacts (7z) - uses: actions/upload-artifact@main - with: - name: RPCS3 for Windows (MSVC) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - if-no-files-found: error - - - name: Deploy master build to GitHub Releases - if: | - github.event_name != 'pull_request' && - github.repository == 'RPCS3/rpcs3' && - github.ref == 'refs/heads/master' - env: - RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }} - run: .ci/github-upload.sh - - - name: Save Build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - - name: Save Dependencies Cache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: ${{ steps.restore-dependencies-cache.outputs.cache-primary-key }} - - Windows_Build_Clang: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - name: RPCS3 Windows Clang - runs-on: windows-2025 - strategy: - matrix: - include: - - msys2: clang64 - compiler: clang - arch: win64 - env: - CCACHE_DIR: 'C:\ccache' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Setup msys2 - uses: msys2/setup-msys2@v2 - with: - msystem: ${{ matrix.msys2 }} - update: true - cache: true - install: | - mingw-w64-clang-x86_64-clang - mingw-w64-clang-x86_64-ccache - mingw-w64-clang-x86_64-cmake - mingw-w64-clang-x86_64-lld - mingw-w64-clang-x86_64-ninja - mingw-w64-clang-x86_64-llvm - mingw-w64-clang-x86_64-ffmpeg - mingw-w64-clang-x86_64-opencv - mingw-w64-clang-x86_64-glew - mingw-w64-clang-x86_64-vulkan - mingw-w64-clang-x86_64-vulkan-headers - mingw-w64-clang-x86_64-vulkan-loader - mingw-w64-clang-x86_64-gtest - mingw-w64-clang-x86_64-qt6-base - mingw-w64-clang-x86_64-qt6-declarative - mingw-w64-clang-x86_64-qt6-multimedia - mingw-w64-clang-x86_64-qt6-svg - base-devel - curl - git - p7zip - - - name: Restore build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-${{ github.run_id }} - restore-keys: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}- - - - name: Build RPCS3 - shell: msys2 {0} - run: | - export CCACHE_DIR=$(cygpath -u "$CCACHE_DIR") - echo "CCACHE_DIR=$CCACHE_DIR" - .ci/setup-windows-ci-vars.sh ${{ matrix.arch }} ${{ matrix.compiler }} - .ci/build-windows-clang.sh - - - name: Save build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - - name: Upload artifacts - uses: actions/upload-artifact@main - with: - name: RPCS3 for Windows (${{ runner.arch }}, ${{ matrix.compiler }}) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - if-no-files-found: error - - FreeBSD_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - name: RPCS3 FreeBSD - runs-on: ubuntu-latest - timeout-minutes: 60 - env: - CCACHE_DIR: ${{ github.workspace }}/ccache - QT_VER_MAIN: '6' - LLVM_COMPILER_VER: '19' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore Build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: FreeBSD-ccache-${{github.run_id}} - restore-keys: FreeBSD-ccache- - - - name: FreeBSD build - id: root - uses: vmactions/freebsd-vm@v1 - with: - envs: 'QT_VER_MAIN LLVM_COMPILER_VER CCACHE_DIR' - usesh: true - run: .ci/install-freebsd.sh && .ci/build-freebsd.sh - - - name: Save Build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} diff --git a/.gitignore b/.gitignore index 4688d5fa52..70a607bbd4 100644 --- a/.gitignore +++ b/.gitignore @@ -31,13 +31,12 @@ *.wav /build -/build-* /lib /tmp /ipch -/packages /rpcs3/Debug /rpcs3/Release +/llvm_build !/bin /bin/* @@ -46,9 +45,6 @@ !/bin/soft_oal.dll !/bin/xaudio2_9redist.dll -# Test Programs -!/bin/test/ - # Themes !/bin/GuiConfigs/ /bin/GuiConfigs/*.ini @@ -56,6 +52,9 @@ /bin/GuiConfigs/*.dat /bin/GuiConfigs/*.dat.* +# Some data from git +!/bin/git/ + # Visual Studio Files .vs/* .vscode/* @@ -102,11 +101,6 @@ rpcs3/rpcs3_autogen/* # QtCreator CMakeLists.txt.user -*.autosave - -# Sublime Text -*.sublime-project -*.sublime-workspace # CLion /.idea/* @@ -115,11 +109,17 @@ CMakeLists.txt.user # macOS .DS_Store +# 7zlib +/3rdparty/7z/**/*.lib + # yaml-cpp yaml-cpp.pc -_ReSharper.*/ -CMakeUserPresets.json +# libusb +/3rdparty/libusb_cmake/config.h +/3rdparty/libusb_cmake/libusb-1.0.pc -.cache/ -.lldbinit +# ssl certificate +cacert.pem + +_ReSharper.*/ diff --git a/.gitmodules b/.gitmodules index 427c61ffbd..4097c40040 100644 --- a/.gitmodules +++ b/.gitmodules @@ -5,16 +5,24 @@ [submodule "asmjit"] path = 3rdparty/asmjit/asmjit url = ../../asmjit/asmjit.git - branch = master + branch = oldstable ignore = dirty -[submodule "3rdparty/llvm/llvm"] - path = 3rdparty/llvm/llvm - url = ../../llvm/llvm-project.git +[submodule "llvm"] + path = llvm + url = ../../RPCS3/llvm-mirror.git ignore = dirty [submodule "3rdparty/glslang"] path = 3rdparty/glslang/glslang url = ../../KhronosGroup/glslang.git ignore = dirty +[submodule "3rdparty/SPIRV-Tools"] + path = 3rdparty/SPIRV/SPIRV-Tools + url = ../../KhronosGroup/SPIRV-Tools.git + ignore = dirty +[submodule "3rdparty/SPIRV-Headers"] + path = 3rdparty/SPIRV/SPIRV-Headers + url = ../../KhronosGroup/SPIRV-Headers.git + ignore = dirty [submodule "3rdparty/zlib"] path = 3rdparty/zlib/zlib url = ../../madler/zlib @@ -28,6 +36,10 @@ path = 3rdparty/pugixml url = ../../zeux/pugixml.git ignore = dirty +[submodule "3rdparty/xxHash"] + path = 3rdparty/xxHash + url = ../../Cyan4973/xxHash.git + ignore = dirty [submodule "3rdparty/yaml-cpp"] path = 3rdparty/yaml-cpp/yaml-cpp url = ../../RPCS3/yaml-cpp.git @@ -49,62 +61,10 @@ url = ../../curl/curl.git ignore = dirty [submodule "3rdparty/wolfssl"] - path = 3rdparty/wolfssl/wolfssl + path = 3rdparty/wolfssl url = ../../wolfSSL/wolfssl.git ignore = dirty [submodule "3rdparty/flatbuffers"] path = 3rdparty/flatbuffers url = ../../google/flatbuffers.git ignore = dirty -[submodule "3rdparty/cubeb/cubeb"] - path = 3rdparty/cubeb/cubeb - url = ../../mozilla/cubeb.git - ignore = dirty -[submodule "3rdparty/SoundTouch/soundtouch"] - path = 3rdparty/SoundTouch/soundtouch - url = ../../RPCS3/soundtouch.git - ignore = dirty -[submodule "3rdparty/libsdl-org/SDL"] - path = 3rdparty/libsdl-org/SDL - url = ../../libsdl-org/SDL.git - ignore = dirty -[submodule "3rdparty/miniupnp/miniupnp"] - path = 3rdparty/miniupnp/miniupnp - url = ../../miniupnp/miniupnp.git - ignore = dirty -[submodule "3rdparty/rtmidi/rtmidi"] - path = 3rdparty/rtmidi/rtmidi - url = ../../thestk/rtmidi - ignore = dirty -[submodule "3rdparty/zstd/zstd"] - path = 3rdparty/zstd/zstd - url = ../../facebook/zstd - ignore = dirty -[submodule "3rdparty/7zip/7zip"] - path = 3rdparty/7zip/7zip - url = ../../ip7z/7zip.git - ignore = dirty -[submodule "3rdparty/OpenAL/openal-soft"] - path = 3rdparty/OpenAL/openal-soft - url = ../../kcat/openal-soft.git - ignore = dirty -[submodule "3rdparty/stblib/stb"] - path = 3rdparty/stblib/stb - url = ../../nothings/stb.git - ignore = dirty -[submodule "3rdparty/opencv/opencv"] - path = 3rdparty/opencv/opencv - url = ../../Megamouse/opencv_minimal.git - ignore = dirty -[submodule "3rdparty/fusion/fusion"] - path = 3rdparty/fusion/fusion - url = ../../xioTechnologies/Fusion.git - ignore = dirty -[submodule "3rdparty/discord-rpc/discord-rpc"] - path = 3rdparty/discord-rpc/discord-rpc - url = ../../Vestrel/discord-rpc - ignore = dirty -[submodule "3rdparty/GPUOpen/VulkanMemoryAllocator"] - path = 3rdparty/GPUOpen/VulkanMemoryAllocator - url = ../../GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git - ignore = dirty diff --git a/3rdparty/7zip/7zip.vcxproj b/3rdparty/7z/7zlib.vcxproj similarity index 61% rename from 3rdparty/7zip/7zip.vcxproj rename to 3rdparty/7z/7zlib.vcxproj index f8420f7b10..db3421ee9c 100644 --- a/3rdparty/7zip/7zip.vcxproj +++ b/3rdparty/7z/7zlib.vcxproj @@ -19,112 +19,89 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 16.0 @@ -179,8 +156,8 @@ - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib/$(Configuration)-$(Platform)/ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/ false diff --git a/3rdparty/7z/7zlib.vcxproj.filters b/3rdparty/7z/7zlib.vcxproj.filters new file mode 100644 index 0000000000..59e30f6cff --- /dev/null +++ b/3rdparty/7z/7zlib.vcxproj.filters @@ -0,0 +1,264 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;ipp;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + \ No newline at end of file diff --git a/3rdparty/7z/CMakeLists.txt b/3rdparty/7z/CMakeLists.txt new file mode 100644 index 0000000000..80979cfafd --- /dev/null +++ b/3rdparty/7z/CMakeLists.txt @@ -0,0 +1,51 @@ +# 7z sdk +if(WIN32) + add_library(3rdparty_7z STATIC EXCLUDE_FROM_ALL + src/Sha256.c + src/Ppmd7Dec.c + src/XzDec.c + src/XzEnc.c + src/Lzma2Dec.c + src/XzCrc64.c + src/DllSecur.c + src/Lzma2DecMt.c + src/BraIA64.c + src/7zFile.c + src/7zStream.c + src/Lzma86Enc.c + src/Threads.c + src/7zAlloc.c + src/LzmaEnc.c + src/MtCoder.c + src/Lzma86Dec.c + src/Sort.c + src/LzFindMt.c + src/7zDec.c + src/Bcj2.c + src/Ppmd7.c + src/Bra86.c + src/Bcj2Enc.c + src/7zBuf.c + src/Aes.c + src/AesOpt.c + src/XzCrc64Opt.c + src/7zArcIn.c + src/Lzma2Enc.c + src/Bra.c + src/7zCrcOpt.c + src/7zBuf2.c + src/LzFind.c + src/Ppmd7Enc.c + src/CpuArch.c + src/Delta.c + src/XzIn.c + src/Alloc.c + src/Xz.c + src/LzmaDec.c + src/LzmaLib.c + src/7zCrc.c + src/MtDec.c) + target_include_directories(3rdparty_7z INTERFACE 7z) +else() + add_library(3rdparty_7z INTERFACE) +endif() diff --git a/3rdparty/7z/src/7z.h b/3rdparty/7z/src/7z.h new file mode 100644 index 0000000000..82813c298b --- /dev/null +++ b/3rdparty/7z/src/7z.h @@ -0,0 +1,202 @@ +/* 7z.h -- 7z interface +2017-04-03 : Igor Pavlov : Public domain */ + +#ifndef __7Z_H +#define __7Z_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define k7zStartHeaderSize 0x20 +#define k7zSignatureSize 6 + +extern const Byte k7zSignature[k7zSignatureSize]; + +typedef struct +{ + const Byte *Data; + size_t Size; +} CSzData; + +/* CSzCoderInfo & CSzFolder support only default methods */ + +typedef struct +{ + size_t PropsOffset; + UInt32 MethodID; + Byte NumStreams; + Byte PropsSize; +} CSzCoderInfo; + +typedef struct +{ + UInt32 InIndex; + UInt32 OutIndex; +} CSzBond; + +#define SZ_NUM_CODERS_IN_FOLDER_MAX 4 +#define SZ_NUM_BONDS_IN_FOLDER_MAX 3 +#define SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX 4 + +typedef struct +{ + UInt32 NumCoders; + UInt32 NumBonds; + UInt32 NumPackStreams; + UInt32 UnpackStream; + UInt32 PackStreams[SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX]; + CSzBond Bonds[SZ_NUM_BONDS_IN_FOLDER_MAX]; + CSzCoderInfo Coders[SZ_NUM_CODERS_IN_FOLDER_MAX]; +} CSzFolder; + + +SRes SzGetNextFolderItem(CSzFolder *f, CSzData *sd); + +typedef struct +{ + UInt32 Low; + UInt32 High; +} CNtfsFileTime; + +typedef struct +{ + Byte *Defs; /* MSB 0 bit numbering */ + UInt32 *Vals; +} CSzBitUi32s; + +typedef struct +{ + Byte *Defs; /* MSB 0 bit numbering */ + // UInt64 *Vals; + CNtfsFileTime *Vals; +} CSzBitUi64s; + +#define SzBitArray_Check(p, i) (((p)[(i) >> 3] & (0x80 >> ((i) & 7))) != 0) + +#define SzBitWithVals_Check(p, i) ((p)->Defs && ((p)->Defs[(i) >> 3] & (0x80 >> ((i) & 7))) != 0) + +typedef struct +{ + UInt32 NumPackStreams; + UInt32 NumFolders; + + UInt64 *PackPositions; // NumPackStreams + 1 + CSzBitUi32s FolderCRCs; // NumFolders + + size_t *FoCodersOffsets; // NumFolders + 1 + UInt32 *FoStartPackStreamIndex; // NumFolders + 1 + UInt32 *FoToCoderUnpackSizes; // NumFolders + 1 + Byte *FoToMainUnpackSizeIndex; // NumFolders + UInt64 *CoderUnpackSizes; // for all coders in all folders + + Byte *CodersData; +} CSzAr; + +UInt64 SzAr_GetFolderUnpackSize(const CSzAr *p, UInt32 folderIndex); + +SRes SzAr_DecodeFolder(const CSzAr *p, UInt32 folderIndex, + ILookInStream *stream, UInt64 startPos, + Byte *outBuffer, size_t outSize, + ISzAllocPtr allocMain); + +typedef struct +{ + CSzAr db; + + UInt64 startPosAfterHeader; + UInt64 dataPos; + + UInt32 NumFiles; + + UInt64 *UnpackPositions; // NumFiles + 1 + // Byte *IsEmptyFiles; + Byte *IsDirs; + CSzBitUi32s CRCs; + + CSzBitUi32s Attribs; + // CSzBitUi32s Parents; + CSzBitUi64s MTime; + CSzBitUi64s CTime; + + UInt32 *FolderToFile; // NumFolders + 1 + UInt32 *FileToFolder; // NumFiles + + size_t *FileNameOffsets; /* in 2-byte steps */ + Byte *FileNames; /* UTF-16-LE */ +} CSzArEx; + +#define SzArEx_IsDir(p, i) (SzBitArray_Check((p)->IsDirs, i)) + +#define SzArEx_GetFileSize(p, i) ((p)->UnpackPositions[(i) + 1] - (p)->UnpackPositions[i]) + +void SzArEx_Init(CSzArEx *p); +void SzArEx_Free(CSzArEx *p, ISzAllocPtr alloc); +UInt64 SzArEx_GetFolderStreamPos(const CSzArEx *p, UInt32 folderIndex, UInt32 indexInFolder); +int SzArEx_GetFolderFullPackSize(const CSzArEx *p, UInt32 folderIndex, UInt64 *resSize); + +/* +if dest == NULL, the return value specifies the required size of the buffer, + in 16-bit characters, including the null-terminating character. +if dest != NULL, the return value specifies the number of 16-bit characters that + are written to the dest, including the null-terminating character. */ + +size_t SzArEx_GetFileNameUtf16(const CSzArEx *p, size_t fileIndex, UInt16 *dest); + +/* +size_t SzArEx_GetFullNameLen(const CSzArEx *p, size_t fileIndex); +UInt16 *SzArEx_GetFullNameUtf16_Back(const CSzArEx *p, size_t fileIndex, UInt16 *dest); +*/ + + + +/* + SzArEx_Extract extracts file from archive + + *outBuffer must be 0 before first call for each new archive. + + Extracting cache: + If you need to decompress more than one file, you can send + these values from previous call: + *blockIndex, + *outBuffer, + *outBufferSize + You can consider "*outBuffer" as cache of solid block. If your archive is solid, + it will increase decompression speed. + + If you use external function, you can declare these 3 cache variables + (blockIndex, outBuffer, outBufferSize) as static in that external function. + + Free *outBuffer and set *outBuffer to 0, if you want to flush cache. +*/ + +SRes SzArEx_Extract( + const CSzArEx *db, + ILookInStream *inStream, + UInt32 fileIndex, /* index of file */ + UInt32 *blockIndex, /* index of solid block */ + Byte **outBuffer, /* pointer to pointer to output buffer (allocated with allocMain) */ + size_t *outBufferSize, /* buffer size for output buffer */ + size_t *offset, /* offset of stream for required file in *outBuffer */ + size_t *outSizeProcessed, /* size of file in *outBuffer */ + ISzAllocPtr allocMain, + ISzAllocPtr allocTemp); + + +/* +SzArEx_Open Errors: +SZ_ERROR_NO_ARCHIVE +SZ_ERROR_ARCHIVE +SZ_ERROR_UNSUPPORTED +SZ_ERROR_MEM +SZ_ERROR_CRC +SZ_ERROR_INPUT_EOF +SZ_ERROR_FAIL +*/ + +SRes SzArEx_Open(CSzArEx *p, ILookInStream *inStream, + ISzAllocPtr allocMain, ISzAllocPtr allocTemp); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/7zAlloc.c b/3rdparty/7z/src/7zAlloc.c new file mode 100644 index 0000000000..ea32809c65 --- /dev/null +++ b/3rdparty/7z/src/7zAlloc.c @@ -0,0 +1,80 @@ +/* 7zAlloc.c -- Allocation functions +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "7zAlloc.h" + +/* #define _SZ_ALLOC_DEBUG */ +/* use _SZ_ALLOC_DEBUG to debug alloc/free operations */ + +#ifdef _SZ_ALLOC_DEBUG + +#ifdef _WIN32 +#include +#endif + +#include +int g_allocCount = 0; +int g_allocCountTemp = 0; + +#endif + +void *SzAlloc(ISzAllocPtr p, size_t size) +{ + UNUSED_VAR(p); + if (size == 0) + return 0; + #ifdef _SZ_ALLOC_DEBUG + fprintf(stderr, "\nAlloc %10u bytes; count = %10d", (unsigned)size, g_allocCount); + g_allocCount++; + #endif + return malloc(size); +} + +void SzFree(ISzAllocPtr p, void *address) +{ + UNUSED_VAR(p); + #ifdef _SZ_ALLOC_DEBUG + if (address != 0) + { + g_allocCount--; + fprintf(stderr, "\nFree; count = %10d", g_allocCount); + } + #endif + free(address); +} + +void *SzAllocTemp(ISzAllocPtr p, size_t size) +{ + UNUSED_VAR(p); + if (size == 0) + return 0; + #ifdef _SZ_ALLOC_DEBUG + fprintf(stderr, "\nAlloc_temp %10u bytes; count = %10d", (unsigned)size, g_allocCountTemp); + g_allocCountTemp++; + #ifdef _WIN32 + return HeapAlloc(GetProcessHeap(), 0, size); + #endif + #endif + return malloc(size); +} + +void SzFreeTemp(ISzAllocPtr p, void *address) +{ + UNUSED_VAR(p); + #ifdef _SZ_ALLOC_DEBUG + if (address != 0) + { + g_allocCountTemp--; + fprintf(stderr, "\nFree_temp; count = %10d", g_allocCountTemp); + } + #ifdef _WIN32 + HeapFree(GetProcessHeap(), 0, address); + return; + #endif + #endif + free(address); +} diff --git a/3rdparty/7z/src/7zAlloc.h b/3rdparty/7z/src/7zAlloc.h new file mode 100644 index 0000000000..c0f89d73cb --- /dev/null +++ b/3rdparty/7z/src/7zAlloc.h @@ -0,0 +1,19 @@ +/* 7zAlloc.h -- Allocation functions +2017-04-03 : Igor Pavlov : Public domain */ + +#ifndef __7Z_ALLOC_H +#define __7Z_ALLOC_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +void *SzAlloc(ISzAllocPtr p, size_t size); +void SzFree(ISzAllocPtr p, void *address); + +void *SzAllocTemp(ISzAllocPtr p, size_t size); +void SzFreeTemp(ISzAllocPtr p, void *address); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/7zArcIn.c b/3rdparty/7z/src/7zArcIn.c new file mode 100644 index 0000000000..68cc12ff45 --- /dev/null +++ b/3rdparty/7z/src/7zArcIn.c @@ -0,0 +1,1771 @@ +/* 7zArcIn.c -- 7z Input functions +2018-12-31 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "7z.h" +#include "7zBuf.h" +#include "7zCrc.h" +#include "CpuArch.h" + +#define MY_ALLOC(T, p, size, alloc) { \ + if ((p = (T *)ISzAlloc_Alloc(alloc, (size) * sizeof(T))) == NULL) return SZ_ERROR_MEM; } + +#define MY_ALLOC_ZE(T, p, size, alloc) { if ((size) == 0) p = NULL; else MY_ALLOC(T, p, size, alloc) } + +#define MY_ALLOC_AND_CPY(to, size, from, alloc) \ + { MY_ALLOC(Byte, to, size, alloc); memcpy(to, from, size); } + +#define MY_ALLOC_ZE_AND_CPY(to, size, from, alloc) \ + { if ((size) == 0) to = NULL; else { MY_ALLOC_AND_CPY(to, size, from, alloc) } } + +#define k7zMajorVersion 0 + +enum EIdEnum +{ + k7zIdEnd, + k7zIdHeader, + k7zIdArchiveProperties, + k7zIdAdditionalStreamsInfo, + k7zIdMainStreamsInfo, + k7zIdFilesInfo, + k7zIdPackInfo, + k7zIdUnpackInfo, + k7zIdSubStreamsInfo, + k7zIdSize, + k7zIdCRC, + k7zIdFolder, + k7zIdCodersUnpackSize, + k7zIdNumUnpackStream, + k7zIdEmptyStream, + k7zIdEmptyFile, + k7zIdAnti, + k7zIdName, + k7zIdCTime, + k7zIdATime, + k7zIdMTime, + k7zIdWinAttrib, + k7zIdComment, + k7zIdEncodedHeader, + k7zIdStartPos, + k7zIdDummy + // k7zNtSecure, + // k7zParent, + // k7zIsReal +}; + +const Byte k7zSignature[k7zSignatureSize] = {'7', 'z', 0xBC, 0xAF, 0x27, 0x1C}; + +#define SzBitUi32s_Init(p) { (p)->Defs = NULL; (p)->Vals = NULL; } + +static SRes SzBitUi32s_Alloc(CSzBitUi32s *p, size_t num, ISzAllocPtr alloc) +{ + if (num == 0) + { + p->Defs = NULL; + p->Vals = NULL; + } + else + { + MY_ALLOC(Byte, p->Defs, (num + 7) >> 3, alloc); + MY_ALLOC(UInt32, p->Vals, num, alloc); + } + return SZ_OK; +} + +void SzBitUi32s_Free(CSzBitUi32s *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->Defs); p->Defs = NULL; + ISzAlloc_Free(alloc, p->Vals); p->Vals = NULL; +} + +#define SzBitUi64s_Init(p) { (p)->Defs = NULL; (p)->Vals = NULL; } + +void SzBitUi64s_Free(CSzBitUi64s *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->Defs); p->Defs = NULL; + ISzAlloc_Free(alloc, p->Vals); p->Vals = NULL; +} + + +static void SzAr_Init(CSzAr *p) +{ + p->NumPackStreams = 0; + p->NumFolders = 0; + + p->PackPositions = NULL; + SzBitUi32s_Init(&p->FolderCRCs); + + p->FoCodersOffsets = NULL; + p->FoStartPackStreamIndex = NULL; + p->FoToCoderUnpackSizes = NULL; + p->FoToMainUnpackSizeIndex = NULL; + p->CoderUnpackSizes = NULL; + + p->CodersData = NULL; +} + +static void SzAr_Free(CSzAr *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->PackPositions); + SzBitUi32s_Free(&p->FolderCRCs, alloc); + + ISzAlloc_Free(alloc, p->FoCodersOffsets); + ISzAlloc_Free(alloc, p->FoStartPackStreamIndex); + ISzAlloc_Free(alloc, p->FoToCoderUnpackSizes); + ISzAlloc_Free(alloc, p->FoToMainUnpackSizeIndex); + ISzAlloc_Free(alloc, p->CoderUnpackSizes); + + ISzAlloc_Free(alloc, p->CodersData); + + SzAr_Init(p); +} + + +void SzArEx_Init(CSzArEx *p) +{ + SzAr_Init(&p->db); + + p->NumFiles = 0; + p->dataPos = 0; + + p->UnpackPositions = NULL; + p->IsDirs = NULL; + + p->FolderToFile = NULL; + p->FileToFolder = NULL; + + p->FileNameOffsets = NULL; + p->FileNames = NULL; + + SzBitUi32s_Init(&p->CRCs); + SzBitUi32s_Init(&p->Attribs); + // SzBitUi32s_Init(&p->Parents); + SzBitUi64s_Init(&p->MTime); + SzBitUi64s_Init(&p->CTime); +} + +void SzArEx_Free(CSzArEx *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->UnpackPositions); + ISzAlloc_Free(alloc, p->IsDirs); + + ISzAlloc_Free(alloc, p->FolderToFile); + ISzAlloc_Free(alloc, p->FileToFolder); + + ISzAlloc_Free(alloc, p->FileNameOffsets); + ISzAlloc_Free(alloc, p->FileNames); + + SzBitUi32s_Free(&p->CRCs, alloc); + SzBitUi32s_Free(&p->Attribs, alloc); + // SzBitUi32s_Free(&p->Parents, alloc); + SzBitUi64s_Free(&p->MTime, alloc); + SzBitUi64s_Free(&p->CTime, alloc); + + SzAr_Free(&p->db, alloc); + SzArEx_Init(p); +} + + +static int TestSignatureCandidate(const Byte *testBytes) +{ + unsigned i; + for (i = 0; i < k7zSignatureSize; i++) + if (testBytes[i] != k7zSignature[i]) + return 0; + return 1; +} + +#define SzData_Clear(p) { (p)->Data = NULL; (p)->Size = 0; } + +#define SZ_READ_BYTE_SD(_sd_, dest) if ((_sd_)->Size == 0) return SZ_ERROR_ARCHIVE; (_sd_)->Size--; dest = *(_sd_)->Data++; +#define SZ_READ_BYTE(dest) SZ_READ_BYTE_SD(sd, dest) +#define SZ_READ_BYTE_2(dest) if (sd.Size == 0) return SZ_ERROR_ARCHIVE; sd.Size--; dest = *sd.Data++; + +#define SKIP_DATA(sd, size) { sd->Size -= (size_t)(size); sd->Data += (size_t)(size); } +#define SKIP_DATA2(sd, size) { sd.Size -= (size_t)(size); sd.Data += (size_t)(size); } + +#define SZ_READ_32(dest) if (sd.Size < 4) return SZ_ERROR_ARCHIVE; \ + dest = GetUi32(sd.Data); SKIP_DATA2(sd, 4); + +static MY_NO_INLINE SRes ReadNumber(CSzData *sd, UInt64 *value) +{ + Byte firstByte, mask; + unsigned i; + UInt32 v; + + SZ_READ_BYTE(firstByte); + if ((firstByte & 0x80) == 0) + { + *value = firstByte; + return SZ_OK; + } + SZ_READ_BYTE(v); + if ((firstByte & 0x40) == 0) + { + *value = (((UInt32)firstByte & 0x3F) << 8) | v; + return SZ_OK; + } + SZ_READ_BYTE(mask); + *value = v | ((UInt32)mask << 8); + mask = 0x20; + for (i = 2; i < 8; i++) + { + Byte b; + if ((firstByte & mask) == 0) + { + UInt64 highPart = (unsigned)firstByte & (unsigned)(mask - 1); + *value |= (highPart << (8 * i)); + return SZ_OK; + } + SZ_READ_BYTE(b); + *value |= ((UInt64)b << (8 * i)); + mask >>= 1; + } + return SZ_OK; +} + + +static MY_NO_INLINE SRes SzReadNumber32(CSzData *sd, UInt32 *value) +{ + Byte firstByte; + UInt64 value64; + if (sd->Size == 0) + return SZ_ERROR_ARCHIVE; + firstByte = *sd->Data; + if ((firstByte & 0x80) == 0) + { + *value = firstByte; + sd->Data++; + sd->Size--; + return SZ_OK; + } + RINOK(ReadNumber(sd, &value64)); + if (value64 >= (UInt32)0x80000000 - 1) + return SZ_ERROR_UNSUPPORTED; + if (value64 >= ((UInt64)(1) << ((sizeof(size_t) - 1) * 8 + 4))) + return SZ_ERROR_UNSUPPORTED; + *value = (UInt32)value64; + return SZ_OK; +} + +#define ReadID(sd, value) ReadNumber(sd, value) + +static SRes SkipData(CSzData *sd) +{ + UInt64 size; + RINOK(ReadNumber(sd, &size)); + if (size > sd->Size) + return SZ_ERROR_ARCHIVE; + SKIP_DATA(sd, size); + return SZ_OK; +} + +static SRes WaitId(CSzData *sd, UInt32 id) +{ + for (;;) + { + UInt64 type; + RINOK(ReadID(sd, &type)); + if (type == id) + return SZ_OK; + if (type == k7zIdEnd) + return SZ_ERROR_ARCHIVE; + RINOK(SkipData(sd)); + } +} + +static SRes RememberBitVector(CSzData *sd, UInt32 numItems, const Byte **v) +{ + UInt32 numBytes = (numItems + 7) >> 3; + if (numBytes > sd->Size) + return SZ_ERROR_ARCHIVE; + *v = sd->Data; + SKIP_DATA(sd, numBytes); + return SZ_OK; +} + +static UInt32 CountDefinedBits(const Byte *bits, UInt32 numItems) +{ + Byte b = 0; + unsigned m = 0; + UInt32 sum = 0; + for (; numItems != 0; numItems--) + { + if (m == 0) + { + b = *bits++; + m = 8; + } + m--; + sum += ((b >> m) & 1); + } + return sum; +} + +static MY_NO_INLINE SRes ReadBitVector(CSzData *sd, UInt32 numItems, Byte **v, ISzAllocPtr alloc) +{ + Byte allAreDefined; + Byte *v2; + UInt32 numBytes = (numItems + 7) >> 3; + *v = NULL; + SZ_READ_BYTE(allAreDefined); + if (numBytes == 0) + return SZ_OK; + if (allAreDefined == 0) + { + if (numBytes > sd->Size) + return SZ_ERROR_ARCHIVE; + MY_ALLOC_AND_CPY(*v, numBytes, sd->Data, alloc); + SKIP_DATA(sd, numBytes); + return SZ_OK; + } + MY_ALLOC(Byte, *v, numBytes, alloc); + v2 = *v; + memset(v2, 0xFF, (size_t)numBytes); + { + unsigned numBits = (unsigned)numItems & 7; + if (numBits != 0) + v2[(size_t)numBytes - 1] = (Byte)((((UInt32)1 << numBits) - 1) << (8 - numBits)); + } + return SZ_OK; +} + +static MY_NO_INLINE SRes ReadUi32s(CSzData *sd2, UInt32 numItems, CSzBitUi32s *crcs, ISzAllocPtr alloc) +{ + UInt32 i; + CSzData sd; + UInt32 *vals; + const Byte *defs; + MY_ALLOC_ZE(UInt32, crcs->Vals, numItems, alloc); + sd = *sd2; + defs = crcs->Defs; + vals = crcs->Vals; + for (i = 0; i < numItems; i++) + if (SzBitArray_Check(defs, i)) + { + SZ_READ_32(vals[i]); + } + else + vals[i] = 0; + *sd2 = sd; + return SZ_OK; +} + +static SRes ReadBitUi32s(CSzData *sd, UInt32 numItems, CSzBitUi32s *crcs, ISzAllocPtr alloc) +{ + SzBitUi32s_Free(crcs, alloc); + RINOK(ReadBitVector(sd, numItems, &crcs->Defs, alloc)); + return ReadUi32s(sd, numItems, crcs, alloc); +} + +static SRes SkipBitUi32s(CSzData *sd, UInt32 numItems) +{ + Byte allAreDefined; + UInt32 numDefined = numItems; + SZ_READ_BYTE(allAreDefined); + if (!allAreDefined) + { + size_t numBytes = (numItems + 7) >> 3; + if (numBytes > sd->Size) + return SZ_ERROR_ARCHIVE; + numDefined = CountDefinedBits(sd->Data, numItems); + SKIP_DATA(sd, numBytes); + } + if (numDefined > (sd->Size >> 2)) + return SZ_ERROR_ARCHIVE; + SKIP_DATA(sd, (size_t)numDefined * 4); + return SZ_OK; +} + +static SRes ReadPackInfo(CSzAr *p, CSzData *sd, ISzAllocPtr alloc) +{ + RINOK(SzReadNumber32(sd, &p->NumPackStreams)); + + RINOK(WaitId(sd, k7zIdSize)); + MY_ALLOC(UInt64, p->PackPositions, (size_t)p->NumPackStreams + 1, alloc); + { + UInt64 sum = 0; + UInt32 i; + UInt32 numPackStreams = p->NumPackStreams; + for (i = 0; i < numPackStreams; i++) + { + UInt64 packSize; + p->PackPositions[i] = sum; + RINOK(ReadNumber(sd, &packSize)); + sum += packSize; + if (sum < packSize) + return SZ_ERROR_ARCHIVE; + } + p->PackPositions[i] = sum; + } + + for (;;) + { + UInt64 type; + RINOK(ReadID(sd, &type)); + if (type == k7zIdEnd) + return SZ_OK; + if (type == k7zIdCRC) + { + /* CRC of packed streams is unused now */ + RINOK(SkipBitUi32s(sd, p->NumPackStreams)); + continue; + } + RINOK(SkipData(sd)); + } +} + +/* +static SRes SzReadSwitch(CSzData *sd) +{ + Byte external; + RINOK(SzReadByte(sd, &external)); + return (external == 0) ? SZ_OK: SZ_ERROR_UNSUPPORTED; +} +*/ + +#define k_NumCodersStreams_in_Folder_MAX (SZ_NUM_BONDS_IN_FOLDER_MAX + SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX) + +SRes SzGetNextFolderItem(CSzFolder *f, CSzData *sd) +{ + UInt32 numCoders, i; + UInt32 numInStreams = 0; + const Byte *dataStart = sd->Data; + + f->NumCoders = 0; + f->NumBonds = 0; + f->NumPackStreams = 0; + f->UnpackStream = 0; + + RINOK(SzReadNumber32(sd, &numCoders)); + if (numCoders == 0 || numCoders > SZ_NUM_CODERS_IN_FOLDER_MAX) + return SZ_ERROR_UNSUPPORTED; + + for (i = 0; i < numCoders; i++) + { + Byte mainByte; + CSzCoderInfo *coder = f->Coders + i; + unsigned idSize, j; + UInt64 id; + + SZ_READ_BYTE(mainByte); + if ((mainByte & 0xC0) != 0) + return SZ_ERROR_UNSUPPORTED; + + idSize = (unsigned)(mainByte & 0xF); + if (idSize > sizeof(id)) + return SZ_ERROR_UNSUPPORTED; + if (idSize > sd->Size) + return SZ_ERROR_ARCHIVE; + id = 0; + for (j = 0; j < idSize; j++) + { + id = ((id << 8) | *sd->Data); + sd->Data++; + sd->Size--; + } + if (id > (UInt32)0xFFFFFFFF) + return SZ_ERROR_UNSUPPORTED; + coder->MethodID = (UInt32)id; + + coder->NumStreams = 1; + coder->PropsOffset = 0; + coder->PropsSize = 0; + + if ((mainByte & 0x10) != 0) + { + UInt32 numStreams; + + RINOK(SzReadNumber32(sd, &numStreams)); + if (numStreams > k_NumCodersStreams_in_Folder_MAX) + return SZ_ERROR_UNSUPPORTED; + coder->NumStreams = (Byte)numStreams; + + RINOK(SzReadNumber32(sd, &numStreams)); + if (numStreams != 1) + return SZ_ERROR_UNSUPPORTED; + } + + numInStreams += coder->NumStreams; + + if (numInStreams > k_NumCodersStreams_in_Folder_MAX) + return SZ_ERROR_UNSUPPORTED; + + if ((mainByte & 0x20) != 0) + { + UInt32 propsSize = 0; + RINOK(SzReadNumber32(sd, &propsSize)); + if (propsSize > sd->Size) + return SZ_ERROR_ARCHIVE; + if (propsSize >= 0x80) + return SZ_ERROR_UNSUPPORTED; + coder->PropsOffset = sd->Data - dataStart; + coder->PropsSize = (Byte)propsSize; + sd->Data += (size_t)propsSize; + sd->Size -= (size_t)propsSize; + } + } + + /* + if (numInStreams == 1 && numCoders == 1) + { + f->NumPackStreams = 1; + f->PackStreams[0] = 0; + } + else + */ + { + Byte streamUsed[k_NumCodersStreams_in_Folder_MAX]; + UInt32 numBonds, numPackStreams; + + numBonds = numCoders - 1; + if (numInStreams < numBonds) + return SZ_ERROR_ARCHIVE; + if (numBonds > SZ_NUM_BONDS_IN_FOLDER_MAX) + return SZ_ERROR_UNSUPPORTED; + f->NumBonds = numBonds; + + numPackStreams = numInStreams - numBonds; + if (numPackStreams > SZ_NUM_PACK_STREAMS_IN_FOLDER_MAX) + return SZ_ERROR_UNSUPPORTED; + f->NumPackStreams = numPackStreams; + + for (i = 0; i < numInStreams; i++) + streamUsed[i] = False; + + if (numBonds != 0) + { + Byte coderUsed[SZ_NUM_CODERS_IN_FOLDER_MAX]; + + for (i = 0; i < numCoders; i++) + coderUsed[i] = False; + + for (i = 0; i < numBonds; i++) + { + CSzBond *bp = f->Bonds + i; + + RINOK(SzReadNumber32(sd, &bp->InIndex)); + if (bp->InIndex >= numInStreams || streamUsed[bp->InIndex]) + return SZ_ERROR_ARCHIVE; + streamUsed[bp->InIndex] = True; + + RINOK(SzReadNumber32(sd, &bp->OutIndex)); + if (bp->OutIndex >= numCoders || coderUsed[bp->OutIndex]) + return SZ_ERROR_ARCHIVE; + coderUsed[bp->OutIndex] = True; + } + + for (i = 0; i < numCoders; i++) + if (!coderUsed[i]) + { + f->UnpackStream = i; + break; + } + + if (i == numCoders) + return SZ_ERROR_ARCHIVE; + } + + if (numPackStreams == 1) + { + for (i = 0; i < numInStreams; i++) + if (!streamUsed[i]) + break; + if (i == numInStreams) + return SZ_ERROR_ARCHIVE; + f->PackStreams[0] = i; + } + else + for (i = 0; i < numPackStreams; i++) + { + UInt32 index; + RINOK(SzReadNumber32(sd, &index)); + if (index >= numInStreams || streamUsed[index]) + return SZ_ERROR_ARCHIVE; + streamUsed[index] = True; + f->PackStreams[i] = index; + } + } + + f->NumCoders = numCoders; + + return SZ_OK; +} + + +static MY_NO_INLINE SRes SkipNumbers(CSzData *sd2, UInt32 num) +{ + CSzData sd; + sd = *sd2; + for (; num != 0; num--) + { + Byte firstByte, mask; + unsigned i; + SZ_READ_BYTE_2(firstByte); + if ((firstByte & 0x80) == 0) + continue; + if ((firstByte & 0x40) == 0) + { + if (sd.Size == 0) + return SZ_ERROR_ARCHIVE; + sd.Size--; + sd.Data++; + continue; + } + mask = 0x20; + for (i = 2; i < 8 && (firstByte & mask) != 0; i++) + mask >>= 1; + if (i > sd.Size) + return SZ_ERROR_ARCHIVE; + SKIP_DATA2(sd, i); + } + *sd2 = sd; + return SZ_OK; +} + + +#define k_Scan_NumCoders_MAX 64 +#define k_Scan_NumCodersStreams_in_Folder_MAX 64 + + +static SRes ReadUnpackInfo(CSzAr *p, + CSzData *sd2, + UInt32 numFoldersMax, + const CBuf *tempBufs, UInt32 numTempBufs, + ISzAllocPtr alloc) +{ + CSzData sd; + + UInt32 fo, numFolders, numCodersOutStreams, packStreamIndex; + const Byte *startBufPtr; + Byte external; + + RINOK(WaitId(sd2, k7zIdFolder)); + + RINOK(SzReadNumber32(sd2, &numFolders)); + if (numFolders > numFoldersMax) + return SZ_ERROR_UNSUPPORTED; + p->NumFolders = numFolders; + + SZ_READ_BYTE_SD(sd2, external); + if (external == 0) + sd = *sd2; + else + { + UInt32 index; + RINOK(SzReadNumber32(sd2, &index)); + if (index >= numTempBufs) + return SZ_ERROR_ARCHIVE; + sd.Data = tempBufs[index].data; + sd.Size = tempBufs[index].size; + } + + MY_ALLOC(size_t, p->FoCodersOffsets, (size_t)numFolders + 1, alloc); + MY_ALLOC(UInt32, p->FoStartPackStreamIndex, (size_t)numFolders + 1, alloc); + MY_ALLOC(UInt32, p->FoToCoderUnpackSizes, (size_t)numFolders + 1, alloc); + MY_ALLOC_ZE(Byte, p->FoToMainUnpackSizeIndex, (size_t)numFolders, alloc); + + startBufPtr = sd.Data; + + packStreamIndex = 0; + numCodersOutStreams = 0; + + for (fo = 0; fo < numFolders; fo++) + { + UInt32 numCoders, ci, numInStreams = 0; + + p->FoCodersOffsets[fo] = sd.Data - startBufPtr; + + RINOK(SzReadNumber32(&sd, &numCoders)); + if (numCoders == 0 || numCoders > k_Scan_NumCoders_MAX) + return SZ_ERROR_UNSUPPORTED; + + for (ci = 0; ci < numCoders; ci++) + { + Byte mainByte; + unsigned idSize; + UInt32 coderInStreams; + + SZ_READ_BYTE_2(mainByte); + if ((mainByte & 0xC0) != 0) + return SZ_ERROR_UNSUPPORTED; + idSize = (mainByte & 0xF); + if (idSize > 8) + return SZ_ERROR_UNSUPPORTED; + if (idSize > sd.Size) + return SZ_ERROR_ARCHIVE; + SKIP_DATA2(sd, idSize); + + coderInStreams = 1; + + if ((mainByte & 0x10) != 0) + { + UInt32 coderOutStreams; + RINOK(SzReadNumber32(&sd, &coderInStreams)); + RINOK(SzReadNumber32(&sd, &coderOutStreams)); + if (coderInStreams > k_Scan_NumCodersStreams_in_Folder_MAX || coderOutStreams != 1) + return SZ_ERROR_UNSUPPORTED; + } + + numInStreams += coderInStreams; + + if ((mainByte & 0x20) != 0) + { + UInt32 propsSize; + RINOK(SzReadNumber32(&sd, &propsSize)); + if (propsSize > sd.Size) + return SZ_ERROR_ARCHIVE; + SKIP_DATA2(sd, propsSize); + } + } + + { + UInt32 indexOfMainStream = 0; + UInt32 numPackStreams = 1; + + if (numCoders != 1 || numInStreams != 1) + { + Byte streamUsed[k_Scan_NumCodersStreams_in_Folder_MAX]; + Byte coderUsed[k_Scan_NumCoders_MAX]; + + UInt32 i; + UInt32 numBonds = numCoders - 1; + if (numInStreams < numBonds) + return SZ_ERROR_ARCHIVE; + + if (numInStreams > k_Scan_NumCodersStreams_in_Folder_MAX) + return SZ_ERROR_UNSUPPORTED; + + for (i = 0; i < numInStreams; i++) + streamUsed[i] = False; + for (i = 0; i < numCoders; i++) + coderUsed[i] = False; + + for (i = 0; i < numBonds; i++) + { + UInt32 index; + + RINOK(SzReadNumber32(&sd, &index)); + if (index >= numInStreams || streamUsed[index]) + return SZ_ERROR_ARCHIVE; + streamUsed[index] = True; + + RINOK(SzReadNumber32(&sd, &index)); + if (index >= numCoders || coderUsed[index]) + return SZ_ERROR_ARCHIVE; + coderUsed[index] = True; + } + + numPackStreams = numInStreams - numBonds; + + if (numPackStreams != 1) + for (i = 0; i < numPackStreams; i++) + { + UInt32 index; + RINOK(SzReadNumber32(&sd, &index)); + if (index >= numInStreams || streamUsed[index]) + return SZ_ERROR_ARCHIVE; + streamUsed[index] = True; + } + + for (i = 0; i < numCoders; i++) + if (!coderUsed[i]) + { + indexOfMainStream = i; + break; + } + + if (i == numCoders) + return SZ_ERROR_ARCHIVE; + } + + p->FoStartPackStreamIndex[fo] = packStreamIndex; + p->FoToCoderUnpackSizes[fo] = numCodersOutStreams; + p->FoToMainUnpackSizeIndex[fo] = (Byte)indexOfMainStream; + numCodersOutStreams += numCoders; + if (numCodersOutStreams < numCoders) + return SZ_ERROR_UNSUPPORTED; + if (numPackStreams > p->NumPackStreams - packStreamIndex) + return SZ_ERROR_ARCHIVE; + packStreamIndex += numPackStreams; + } + } + + p->FoToCoderUnpackSizes[fo] = numCodersOutStreams; + + { + size_t dataSize = sd.Data - startBufPtr; + p->FoStartPackStreamIndex[fo] = packStreamIndex; + p->FoCodersOffsets[fo] = dataSize; + MY_ALLOC_ZE_AND_CPY(p->CodersData, dataSize, startBufPtr, alloc); + } + + if (external != 0) + { + if (sd.Size != 0) + return SZ_ERROR_ARCHIVE; + sd = *sd2; + } + + RINOK(WaitId(&sd, k7zIdCodersUnpackSize)); + + MY_ALLOC_ZE(UInt64, p->CoderUnpackSizes, (size_t)numCodersOutStreams, alloc); + { + UInt32 i; + for (i = 0; i < numCodersOutStreams; i++) + { + RINOK(ReadNumber(&sd, p->CoderUnpackSizes + i)); + } + } + + for (;;) + { + UInt64 type; + RINOK(ReadID(&sd, &type)); + if (type == k7zIdEnd) + { + *sd2 = sd; + return SZ_OK; + } + if (type == k7zIdCRC) + { + RINOK(ReadBitUi32s(&sd, numFolders, &p->FolderCRCs, alloc)); + continue; + } + RINOK(SkipData(&sd)); + } +} + + +UInt64 SzAr_GetFolderUnpackSize(const CSzAr *p, UInt32 folderIndex) +{ + return p->CoderUnpackSizes[p->FoToCoderUnpackSizes[folderIndex] + p->FoToMainUnpackSizeIndex[folderIndex]]; +} + + +typedef struct +{ + UInt32 NumTotalSubStreams; + UInt32 NumSubDigests; + CSzData sdNumSubStreams; + CSzData sdSizes; + CSzData sdCRCs; +} CSubStreamInfo; + + +static SRes ReadSubStreamsInfo(CSzAr *p, CSzData *sd, CSubStreamInfo *ssi) +{ + UInt64 type = 0; + UInt32 numSubDigests = 0; + UInt32 numFolders = p->NumFolders; + UInt32 numUnpackStreams = numFolders; + UInt32 numUnpackSizesInData = 0; + + for (;;) + { + RINOK(ReadID(sd, &type)); + if (type == k7zIdNumUnpackStream) + { + UInt32 i; + ssi->sdNumSubStreams.Data = sd->Data; + numUnpackStreams = 0; + numSubDigests = 0; + for (i = 0; i < numFolders; i++) + { + UInt32 numStreams; + RINOK(SzReadNumber32(sd, &numStreams)); + if (numUnpackStreams > numUnpackStreams + numStreams) + return SZ_ERROR_UNSUPPORTED; + numUnpackStreams += numStreams; + if (numStreams != 0) + numUnpackSizesInData += (numStreams - 1); + if (numStreams != 1 || !SzBitWithVals_Check(&p->FolderCRCs, i)) + numSubDigests += numStreams; + } + ssi->sdNumSubStreams.Size = sd->Data - ssi->sdNumSubStreams.Data; + continue; + } + if (type == k7zIdCRC || type == k7zIdSize || type == k7zIdEnd) + break; + RINOK(SkipData(sd)); + } + + if (!ssi->sdNumSubStreams.Data) + { + numSubDigests = numFolders; + if (p->FolderCRCs.Defs) + numSubDigests = numFolders - CountDefinedBits(p->FolderCRCs.Defs, numFolders); + } + + ssi->NumTotalSubStreams = numUnpackStreams; + ssi->NumSubDigests = numSubDigests; + + if (type == k7zIdSize) + { + ssi->sdSizes.Data = sd->Data; + RINOK(SkipNumbers(sd, numUnpackSizesInData)); + ssi->sdSizes.Size = sd->Data - ssi->sdSizes.Data; + RINOK(ReadID(sd, &type)); + } + + for (;;) + { + if (type == k7zIdEnd) + return SZ_OK; + if (type == k7zIdCRC) + { + ssi->sdCRCs.Data = sd->Data; + RINOK(SkipBitUi32s(sd, numSubDigests)); + ssi->sdCRCs.Size = sd->Data - ssi->sdCRCs.Data; + } + else + { + RINOK(SkipData(sd)); + } + RINOK(ReadID(sd, &type)); + } +} + +static SRes SzReadStreamsInfo(CSzAr *p, + CSzData *sd, + UInt32 numFoldersMax, const CBuf *tempBufs, UInt32 numTempBufs, + UInt64 *dataOffset, + CSubStreamInfo *ssi, + ISzAllocPtr alloc) +{ + UInt64 type; + + SzData_Clear(&ssi->sdSizes); + SzData_Clear(&ssi->sdCRCs); + SzData_Clear(&ssi->sdNumSubStreams); + + *dataOffset = 0; + RINOK(ReadID(sd, &type)); + if (type == k7zIdPackInfo) + { + RINOK(ReadNumber(sd, dataOffset)); + RINOK(ReadPackInfo(p, sd, alloc)); + RINOK(ReadID(sd, &type)); + } + if (type == k7zIdUnpackInfo) + { + RINOK(ReadUnpackInfo(p, sd, numFoldersMax, tempBufs, numTempBufs, alloc)); + RINOK(ReadID(sd, &type)); + } + if (type == k7zIdSubStreamsInfo) + { + RINOK(ReadSubStreamsInfo(p, sd, ssi)); + RINOK(ReadID(sd, &type)); + } + else + { + ssi->NumTotalSubStreams = p->NumFolders; + // ssi->NumSubDigests = 0; + } + + return (type == k7zIdEnd ? SZ_OK : SZ_ERROR_UNSUPPORTED); +} + +static SRes SzReadAndDecodePackedStreams( + ILookInStream *inStream, + CSzData *sd, + CBuf *tempBufs, + UInt32 numFoldersMax, + UInt64 baseOffset, + CSzAr *p, + ISzAllocPtr allocTemp) +{ + UInt64 dataStartPos; + UInt32 fo; + CSubStreamInfo ssi; + + RINOK(SzReadStreamsInfo(p, sd, numFoldersMax, NULL, 0, &dataStartPos, &ssi, allocTemp)); + + dataStartPos += baseOffset; + if (p->NumFolders == 0) + return SZ_ERROR_ARCHIVE; + + for (fo = 0; fo < p->NumFolders; fo++) + Buf_Init(tempBufs + fo); + + for (fo = 0; fo < p->NumFolders; fo++) + { + CBuf *tempBuf = tempBufs + fo; + UInt64 unpackSize = SzAr_GetFolderUnpackSize(p, fo); + if ((size_t)unpackSize != unpackSize) + return SZ_ERROR_MEM; + if (!Buf_Create(tempBuf, (size_t)unpackSize, allocTemp)) + return SZ_ERROR_MEM; + } + + for (fo = 0; fo < p->NumFolders; fo++) + { + const CBuf *tempBuf = tempBufs + fo; + RINOK(LookInStream_SeekTo(inStream, dataStartPos)); + RINOK(SzAr_DecodeFolder(p, fo, inStream, dataStartPos, tempBuf->data, tempBuf->size, allocTemp)); + } + + return SZ_OK; +} + +static SRes SzReadFileNames(const Byte *data, size_t size, UInt32 numFiles, size_t *offsets) +{ + size_t pos = 0; + *offsets++ = 0; + if (numFiles == 0) + return (size == 0) ? SZ_OK : SZ_ERROR_ARCHIVE; + if (size < 2) + return SZ_ERROR_ARCHIVE; + if (data[size - 2] != 0 || data[size - 1] != 0) + return SZ_ERROR_ARCHIVE; + do + { + const Byte *p; + if (pos == size) + return SZ_ERROR_ARCHIVE; + for (p = data + pos; + #ifdef _WIN32 + *(const UInt16 *)p != 0 + #else + p[0] != 0 || p[1] != 0 + #endif + ; p += 2); + pos = p - data + 2; + *offsets++ = (pos >> 1); + } + while (--numFiles); + return (pos == size) ? SZ_OK : SZ_ERROR_ARCHIVE; +} + +static MY_NO_INLINE SRes ReadTime(CSzBitUi64s *p, UInt32 num, + CSzData *sd2, + const CBuf *tempBufs, UInt32 numTempBufs, + ISzAllocPtr alloc) +{ + CSzData sd; + UInt32 i; + CNtfsFileTime *vals; + Byte *defs; + Byte external; + + RINOK(ReadBitVector(sd2, num, &p->Defs, alloc)); + + SZ_READ_BYTE_SD(sd2, external); + if (external == 0) + sd = *sd2; + else + { + UInt32 index; + RINOK(SzReadNumber32(sd2, &index)); + if (index >= numTempBufs) + return SZ_ERROR_ARCHIVE; + sd.Data = tempBufs[index].data; + sd.Size = tempBufs[index].size; + } + + MY_ALLOC_ZE(CNtfsFileTime, p->Vals, num, alloc); + vals = p->Vals; + defs = p->Defs; + for (i = 0; i < num; i++) + if (SzBitArray_Check(defs, i)) + { + if (sd.Size < 8) + return SZ_ERROR_ARCHIVE; + vals[i].Low = GetUi32(sd.Data); + vals[i].High = GetUi32(sd.Data + 4); + SKIP_DATA2(sd, 8); + } + else + vals[i].High = vals[i].Low = 0; + + if (external == 0) + *sd2 = sd; + + return SZ_OK; +} + + +#define NUM_ADDITIONAL_STREAMS_MAX 8 + + +static SRes SzReadHeader2( + CSzArEx *p, /* allocMain */ + CSzData *sd, + ILookInStream *inStream, + CBuf *tempBufs, UInt32 *numTempBufs, + ISzAllocPtr allocMain, + ISzAllocPtr allocTemp + ) +{ + CSubStreamInfo ssi; + +{ + UInt64 type; + + SzData_Clear(&ssi.sdSizes); + SzData_Clear(&ssi.sdCRCs); + SzData_Clear(&ssi.sdNumSubStreams); + + ssi.NumSubDigests = 0; + ssi.NumTotalSubStreams = 0; + + RINOK(ReadID(sd, &type)); + + if (type == k7zIdArchiveProperties) + { + for (;;) + { + UInt64 type2; + RINOK(ReadID(sd, &type2)); + if (type2 == k7zIdEnd) + break; + RINOK(SkipData(sd)); + } + RINOK(ReadID(sd, &type)); + } + + if (type == k7zIdAdditionalStreamsInfo) + { + CSzAr tempAr; + SRes res; + + SzAr_Init(&tempAr); + res = SzReadAndDecodePackedStreams(inStream, sd, tempBufs, NUM_ADDITIONAL_STREAMS_MAX, + p->startPosAfterHeader, &tempAr, allocTemp); + *numTempBufs = tempAr.NumFolders; + SzAr_Free(&tempAr, allocTemp); + + if (res != SZ_OK) + return res; + RINOK(ReadID(sd, &type)); + } + + if (type == k7zIdMainStreamsInfo) + { + RINOK(SzReadStreamsInfo(&p->db, sd, (UInt32)1 << 30, tempBufs, *numTempBufs, + &p->dataPos, &ssi, allocMain)); + p->dataPos += p->startPosAfterHeader; + RINOK(ReadID(sd, &type)); + } + + if (type == k7zIdEnd) + { + return SZ_OK; + } + + if (type != k7zIdFilesInfo) + return SZ_ERROR_ARCHIVE; +} + +{ + UInt32 numFiles = 0; + UInt32 numEmptyStreams = 0; + const Byte *emptyStreams = NULL; + const Byte *emptyFiles = NULL; + + RINOK(SzReadNumber32(sd, &numFiles)); + p->NumFiles = numFiles; + + for (;;) + { + UInt64 type; + UInt64 size; + RINOK(ReadID(sd, &type)); + if (type == k7zIdEnd) + break; + RINOK(ReadNumber(sd, &size)); + if (size > sd->Size) + return SZ_ERROR_ARCHIVE; + + if (type >= ((UInt32)1 << 8)) + { + SKIP_DATA(sd, size); + } + else switch ((unsigned)type) + { + case k7zIdName: + { + size_t namesSize; + const Byte *namesData; + Byte external; + + SZ_READ_BYTE(external); + if (external == 0) + { + namesSize = (size_t)size - 1; + namesData = sd->Data; + } + else + { + UInt32 index; + RINOK(SzReadNumber32(sd, &index)); + if (index >= *numTempBufs) + return SZ_ERROR_ARCHIVE; + namesData = (tempBufs)[index].data; + namesSize = (tempBufs)[index].size; + } + + if ((namesSize & 1) != 0) + return SZ_ERROR_ARCHIVE; + MY_ALLOC(size_t, p->FileNameOffsets, numFiles + 1, allocMain); + MY_ALLOC_ZE_AND_CPY(p->FileNames, namesSize, namesData, allocMain); + RINOK(SzReadFileNames(p->FileNames, namesSize, numFiles, p->FileNameOffsets)) + if (external == 0) + { + SKIP_DATA(sd, namesSize); + } + break; + } + case k7zIdEmptyStream: + { + RINOK(RememberBitVector(sd, numFiles, &emptyStreams)); + numEmptyStreams = CountDefinedBits(emptyStreams, numFiles); + emptyFiles = NULL; + break; + } + case k7zIdEmptyFile: + { + RINOK(RememberBitVector(sd, numEmptyStreams, &emptyFiles)); + break; + } + case k7zIdWinAttrib: + { + Byte external; + CSzData sdSwitch; + CSzData *sdPtr; + SzBitUi32s_Free(&p->Attribs, allocMain); + RINOK(ReadBitVector(sd, numFiles, &p->Attribs.Defs, allocMain)); + + SZ_READ_BYTE(external); + if (external == 0) + sdPtr = sd; + else + { + UInt32 index; + RINOK(SzReadNumber32(sd, &index)); + if (index >= *numTempBufs) + return SZ_ERROR_ARCHIVE; + sdSwitch.Data = (tempBufs)[index].data; + sdSwitch.Size = (tempBufs)[index].size; + sdPtr = &sdSwitch; + } + RINOK(ReadUi32s(sdPtr, numFiles, &p->Attribs, allocMain)); + break; + } + /* + case k7zParent: + { + SzBitUi32s_Free(&p->Parents, allocMain); + RINOK(ReadBitVector(sd, numFiles, &p->Parents.Defs, allocMain)); + RINOK(SzReadSwitch(sd)); + RINOK(ReadUi32s(sd, numFiles, &p->Parents, allocMain)); + break; + } + */ + case k7zIdMTime: RINOK(ReadTime(&p->MTime, numFiles, sd, tempBufs, *numTempBufs, allocMain)); break; + case k7zIdCTime: RINOK(ReadTime(&p->CTime, numFiles, sd, tempBufs, *numTempBufs, allocMain)); break; + default: + { + SKIP_DATA(sd, size); + } + } + } + + if (numFiles - numEmptyStreams != ssi.NumTotalSubStreams) + return SZ_ERROR_ARCHIVE; + + for (;;) + { + UInt64 type; + RINOK(ReadID(sd, &type)); + if (type == k7zIdEnd) + break; + RINOK(SkipData(sd)); + } + + { + UInt32 i; + UInt32 emptyFileIndex = 0; + UInt32 folderIndex = 0; + UInt32 remSubStreams = 0; + UInt32 numSubStreams = 0; + UInt64 unpackPos = 0; + const Byte *digestsDefs = NULL; + const Byte *digestsVals = NULL; + UInt32 digestsValsIndex = 0; + UInt32 digestIndex; + Byte allDigestsDefined = 0; + Byte isDirMask = 0; + Byte crcMask = 0; + Byte mask = 0x80; + + MY_ALLOC(UInt32, p->FolderToFile, p->db.NumFolders + 1, allocMain); + MY_ALLOC_ZE(UInt32, p->FileToFolder, p->NumFiles, allocMain); + MY_ALLOC(UInt64, p->UnpackPositions, p->NumFiles + 1, allocMain); + MY_ALLOC_ZE(Byte, p->IsDirs, (p->NumFiles + 7) >> 3, allocMain); + + RINOK(SzBitUi32s_Alloc(&p->CRCs, p->NumFiles, allocMain)); + + if (ssi.sdCRCs.Size != 0) + { + SZ_READ_BYTE_SD(&ssi.sdCRCs, allDigestsDefined); + if (allDigestsDefined) + digestsVals = ssi.sdCRCs.Data; + else + { + size_t numBytes = (ssi.NumSubDigests + 7) >> 3; + digestsDefs = ssi.sdCRCs.Data; + digestsVals = digestsDefs + numBytes; + } + } + + digestIndex = 0; + + for (i = 0; i < numFiles; i++, mask >>= 1) + { + if (mask == 0) + { + UInt32 byteIndex = (i - 1) >> 3; + p->IsDirs[byteIndex] = isDirMask; + p->CRCs.Defs[byteIndex] = crcMask; + isDirMask = 0; + crcMask = 0; + mask = 0x80; + } + + p->UnpackPositions[i] = unpackPos; + p->CRCs.Vals[i] = 0; + + if (emptyStreams && SzBitArray_Check(emptyStreams, i)) + { + if (emptyFiles) + { + if (!SzBitArray_Check(emptyFiles, emptyFileIndex)) + isDirMask |= mask; + emptyFileIndex++; + } + else + isDirMask |= mask; + if (remSubStreams == 0) + { + p->FileToFolder[i] = (UInt32)-1; + continue; + } + } + + if (remSubStreams == 0) + { + for (;;) + { + if (folderIndex >= p->db.NumFolders) + return SZ_ERROR_ARCHIVE; + p->FolderToFile[folderIndex] = i; + numSubStreams = 1; + if (ssi.sdNumSubStreams.Data) + { + RINOK(SzReadNumber32(&ssi.sdNumSubStreams, &numSubStreams)); + } + remSubStreams = numSubStreams; + if (numSubStreams != 0) + break; + { + UInt64 folderUnpackSize = SzAr_GetFolderUnpackSize(&p->db, folderIndex); + unpackPos += folderUnpackSize; + if (unpackPos < folderUnpackSize) + return SZ_ERROR_ARCHIVE; + } + + folderIndex++; + } + } + + p->FileToFolder[i] = folderIndex; + + if (emptyStreams && SzBitArray_Check(emptyStreams, i)) + continue; + + if (--remSubStreams == 0) + { + UInt64 folderUnpackSize = SzAr_GetFolderUnpackSize(&p->db, folderIndex); + UInt64 startFolderUnpackPos = p->UnpackPositions[p->FolderToFile[folderIndex]]; + if (folderUnpackSize < unpackPos - startFolderUnpackPos) + return SZ_ERROR_ARCHIVE; + unpackPos = startFolderUnpackPos + folderUnpackSize; + if (unpackPos < folderUnpackSize) + return SZ_ERROR_ARCHIVE; + + if (numSubStreams == 1 && SzBitWithVals_Check(&p->db.FolderCRCs, i)) + { + p->CRCs.Vals[i] = p->db.FolderCRCs.Vals[folderIndex]; + crcMask |= mask; + } + else if (allDigestsDefined || (digestsDefs && SzBitArray_Check(digestsDefs, digestIndex))) + { + p->CRCs.Vals[i] = GetUi32(digestsVals + (size_t)digestsValsIndex * 4); + digestsValsIndex++; + crcMask |= mask; + } + + folderIndex++; + } + else + { + UInt64 v; + RINOK(ReadNumber(&ssi.sdSizes, &v)); + unpackPos += v; + if (unpackPos < v) + return SZ_ERROR_ARCHIVE; + if (allDigestsDefined || (digestsDefs && SzBitArray_Check(digestsDefs, digestIndex))) + { + p->CRCs.Vals[i] = GetUi32(digestsVals + (size_t)digestsValsIndex * 4); + digestsValsIndex++; + crcMask |= mask; + } + } + } + + if (mask != 0x80) + { + UInt32 byteIndex = (i - 1) >> 3; + p->IsDirs[byteIndex] = isDirMask; + p->CRCs.Defs[byteIndex] = crcMask; + } + + p->UnpackPositions[i] = unpackPos; + + if (remSubStreams != 0) + return SZ_ERROR_ARCHIVE; + + for (;;) + { + p->FolderToFile[folderIndex] = i; + if (folderIndex >= p->db.NumFolders) + break; + if (!ssi.sdNumSubStreams.Data) + return SZ_ERROR_ARCHIVE; + RINOK(SzReadNumber32(&ssi.sdNumSubStreams, &numSubStreams)); + if (numSubStreams != 0) + return SZ_ERROR_ARCHIVE; + /* + { + UInt64 folderUnpackSize = SzAr_GetFolderUnpackSize(&p->db, folderIndex); + unpackPos += folderUnpackSize; + if (unpackPos < folderUnpackSize) + return SZ_ERROR_ARCHIVE; + } + */ + folderIndex++; + } + + if (ssi.sdNumSubStreams.Data && ssi.sdNumSubStreams.Size != 0) + return SZ_ERROR_ARCHIVE; + } +} + return SZ_OK; +} + + +static SRes SzReadHeader( + CSzArEx *p, + CSzData *sd, + ILookInStream *inStream, + ISzAllocPtr allocMain, + ISzAllocPtr allocTemp) +{ + UInt32 i; + UInt32 numTempBufs = 0; + SRes res; + CBuf tempBufs[NUM_ADDITIONAL_STREAMS_MAX]; + + for (i = 0; i < NUM_ADDITIONAL_STREAMS_MAX; i++) + Buf_Init(tempBufs + i); + + res = SzReadHeader2(p, sd, inStream, + tempBufs, &numTempBufs, + allocMain, allocTemp); + + for (i = 0; i < NUM_ADDITIONAL_STREAMS_MAX; i++) + Buf_Free(tempBufs + i, allocTemp); + + RINOK(res); + + if (sd->Size != 0) + return SZ_ERROR_FAIL; + + return res; +} + +static SRes SzArEx_Open2( + CSzArEx *p, + ILookInStream *inStream, + ISzAllocPtr allocMain, + ISzAllocPtr allocTemp) +{ + Byte header[k7zStartHeaderSize]; + Int64 startArcPos; + UInt64 nextHeaderOffset, nextHeaderSize; + size_t nextHeaderSizeT; + UInt32 nextHeaderCRC; + CBuf buf; + SRes res; + + startArcPos = 0; + RINOK(ILookInStream_Seek(inStream, &startArcPos, SZ_SEEK_CUR)); + + RINOK(LookInStream_Read2(inStream, header, k7zStartHeaderSize, SZ_ERROR_NO_ARCHIVE)); + + if (!TestSignatureCandidate(header)) + return SZ_ERROR_NO_ARCHIVE; + if (header[6] != k7zMajorVersion) + return SZ_ERROR_UNSUPPORTED; + + nextHeaderOffset = GetUi64(header + 12); + nextHeaderSize = GetUi64(header + 20); + nextHeaderCRC = GetUi32(header + 28); + + p->startPosAfterHeader = startArcPos + k7zStartHeaderSize; + + if (CrcCalc(header + 12, 20) != GetUi32(header + 8)) + return SZ_ERROR_CRC; + + nextHeaderSizeT = (size_t)nextHeaderSize; + if (nextHeaderSizeT != nextHeaderSize) + return SZ_ERROR_MEM; + if (nextHeaderSizeT == 0) + return SZ_OK; + if (nextHeaderOffset > nextHeaderOffset + nextHeaderSize || + nextHeaderOffset > nextHeaderOffset + nextHeaderSize + k7zStartHeaderSize) + return SZ_ERROR_NO_ARCHIVE; + + { + Int64 pos = 0; + RINOK(ILookInStream_Seek(inStream, &pos, SZ_SEEK_END)); + if ((UInt64)pos < startArcPos + nextHeaderOffset || + (UInt64)pos < startArcPos + k7zStartHeaderSize + nextHeaderOffset || + (UInt64)pos < startArcPos + k7zStartHeaderSize + nextHeaderOffset + nextHeaderSize) + return SZ_ERROR_INPUT_EOF; + } + + RINOK(LookInStream_SeekTo(inStream, startArcPos + k7zStartHeaderSize + nextHeaderOffset)); + + if (!Buf_Create(&buf, nextHeaderSizeT, allocTemp)) + return SZ_ERROR_MEM; + + res = LookInStream_Read(inStream, buf.data, nextHeaderSizeT); + + if (res == SZ_OK) + { + res = SZ_ERROR_ARCHIVE; + if (CrcCalc(buf.data, nextHeaderSizeT) == nextHeaderCRC) + { + CSzData sd; + UInt64 type; + sd.Data = buf.data; + sd.Size = buf.size; + + res = ReadID(&sd, &type); + + if (res == SZ_OK && type == k7zIdEncodedHeader) + { + CSzAr tempAr; + CBuf tempBuf; + Buf_Init(&tempBuf); + + SzAr_Init(&tempAr); + res = SzReadAndDecodePackedStreams(inStream, &sd, &tempBuf, 1, p->startPosAfterHeader, &tempAr, allocTemp); + SzAr_Free(&tempAr, allocTemp); + + if (res != SZ_OK) + { + Buf_Free(&tempBuf, allocTemp); + } + else + { + Buf_Free(&buf, allocTemp); + buf.data = tempBuf.data; + buf.size = tempBuf.size; + sd.Data = buf.data; + sd.Size = buf.size; + res = ReadID(&sd, &type); + } + } + + if (res == SZ_OK) + { + if (type == k7zIdHeader) + { + /* + CSzData sd2; + unsigned ttt; + for (ttt = 0; ttt < 40000; ttt++) + { + SzArEx_Free(p, allocMain); + sd2 = sd; + res = SzReadHeader(p, &sd2, inStream, allocMain, allocTemp); + if (res != SZ_OK) + break; + } + */ + res = SzReadHeader(p, &sd, inStream, allocMain, allocTemp); + } + else + res = SZ_ERROR_UNSUPPORTED; + } + } + } + + Buf_Free(&buf, allocTemp); + return res; +} + + +SRes SzArEx_Open(CSzArEx *p, ILookInStream *inStream, + ISzAllocPtr allocMain, ISzAllocPtr allocTemp) +{ + SRes res = SzArEx_Open2(p, inStream, allocMain, allocTemp); + if (res != SZ_OK) + SzArEx_Free(p, allocMain); + return res; +} + + +SRes SzArEx_Extract( + const CSzArEx *p, + ILookInStream *inStream, + UInt32 fileIndex, + UInt32 *blockIndex, + Byte **tempBuf, + size_t *outBufferSize, + size_t *offset, + size_t *outSizeProcessed, + ISzAllocPtr allocMain, + ISzAllocPtr allocTemp) +{ + UInt32 folderIndex = p->FileToFolder[fileIndex]; + SRes res = SZ_OK; + + *offset = 0; + *outSizeProcessed = 0; + + if (folderIndex == (UInt32)-1) + { + ISzAlloc_Free(allocMain, *tempBuf); + *blockIndex = folderIndex; + *tempBuf = NULL; + *outBufferSize = 0; + return SZ_OK; + } + + if (*tempBuf == NULL || *blockIndex != folderIndex) + { + UInt64 unpackSizeSpec = SzAr_GetFolderUnpackSize(&p->db, folderIndex); + /* + UInt64 unpackSizeSpec = + p->UnpackPositions[p->FolderToFile[(size_t)folderIndex + 1]] - + p->UnpackPositions[p->FolderToFile[folderIndex]]; + */ + size_t unpackSize = (size_t)unpackSizeSpec; + + if (unpackSize != unpackSizeSpec) + return SZ_ERROR_MEM; + *blockIndex = folderIndex; + ISzAlloc_Free(allocMain, *tempBuf); + *tempBuf = NULL; + + if (res == SZ_OK) + { + *outBufferSize = unpackSize; + if (unpackSize != 0) + { + *tempBuf = (Byte *)ISzAlloc_Alloc(allocMain, unpackSize); + if (*tempBuf == NULL) + res = SZ_ERROR_MEM; + } + + if (res == SZ_OK) + { + res = SzAr_DecodeFolder(&p->db, folderIndex, + inStream, p->dataPos, *tempBuf, unpackSize, allocTemp); + } + } + } + + if (res == SZ_OK) + { + UInt64 unpackPos = p->UnpackPositions[fileIndex]; + *offset = (size_t)(unpackPos - p->UnpackPositions[p->FolderToFile[folderIndex]]); + *outSizeProcessed = (size_t)(p->UnpackPositions[(size_t)fileIndex + 1] - unpackPos); + if (*offset + *outSizeProcessed > *outBufferSize) + return SZ_ERROR_FAIL; + if (SzBitWithVals_Check(&p->CRCs, fileIndex)) + if (CrcCalc(*tempBuf + *offset, *outSizeProcessed) != p->CRCs.Vals[fileIndex]) + res = SZ_ERROR_CRC; + } + + return res; +} + + +size_t SzArEx_GetFileNameUtf16(const CSzArEx *p, size_t fileIndex, UInt16 *dest) +{ + size_t offs = p->FileNameOffsets[fileIndex]; + size_t len = p->FileNameOffsets[fileIndex + 1] - offs; + if (dest != 0) + { + size_t i; + const Byte *src = p->FileNames + offs * 2; + for (i = 0; i < len; i++) + dest[i] = GetUi16(src + i * 2); + } + return len; +} + +/* +size_t SzArEx_GetFullNameLen(const CSzArEx *p, size_t fileIndex) +{ + size_t len; + if (!p->FileNameOffsets) + return 1; + len = 0; + for (;;) + { + UInt32 parent = (UInt32)(Int32)-1; + len += p->FileNameOffsets[fileIndex + 1] - p->FileNameOffsets[fileIndex]; + if SzBitWithVals_Check(&p->Parents, fileIndex) + parent = p->Parents.Vals[fileIndex]; + if (parent == (UInt32)(Int32)-1) + return len; + fileIndex = parent; + } +} + +UInt16 *SzArEx_GetFullNameUtf16_Back(const CSzArEx *p, size_t fileIndex, UInt16 *dest) +{ + BoolInt needSlash; + if (!p->FileNameOffsets) + { + *(--dest) = 0; + return dest; + } + needSlash = False; + for (;;) + { + UInt32 parent = (UInt32)(Int32)-1; + size_t curLen = p->FileNameOffsets[fileIndex + 1] - p->FileNameOffsets[fileIndex]; + SzArEx_GetFileNameUtf16(p, fileIndex, dest - curLen); + if (needSlash) + *(dest - 1) = '/'; + needSlash = True; + dest -= curLen; + + if SzBitWithVals_Check(&p->Parents, fileIndex) + parent = p->Parents.Vals[fileIndex]; + if (parent == (UInt32)(Int32)-1) + return dest; + fileIndex = parent; + } +} +*/ diff --git a/3rdparty/7z/src/7zBuf.c b/3rdparty/7z/src/7zBuf.c new file mode 100644 index 0000000000..438bba68bd --- /dev/null +++ b/3rdparty/7z/src/7zBuf.c @@ -0,0 +1,36 @@ +/* 7zBuf.c -- Byte Buffer +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "7zBuf.h" + +void Buf_Init(CBuf *p) +{ + p->data = 0; + p->size = 0; +} + +int Buf_Create(CBuf *p, size_t size, ISzAllocPtr alloc) +{ + p->size = 0; + if (size == 0) + { + p->data = 0; + return 1; + } + p->data = (Byte *)ISzAlloc_Alloc(alloc, size); + if (p->data) + { + p->size = size; + return 1; + } + return 0; +} + +void Buf_Free(CBuf *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->data); + p->data = 0; + p->size = 0; +} diff --git a/3rdparty/7z/src/7zBuf.h b/3rdparty/7z/src/7zBuf.h new file mode 100644 index 0000000000..5942d6e623 --- /dev/null +++ b/3rdparty/7z/src/7zBuf.h @@ -0,0 +1,35 @@ +/* 7zBuf.h -- Byte Buffer +2017-04-03 : Igor Pavlov : Public domain */ + +#ifndef __7Z_BUF_H +#define __7Z_BUF_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +typedef struct +{ + Byte *data; + size_t size; +} CBuf; + +void Buf_Init(CBuf *p); +int Buf_Create(CBuf *p, size_t size, ISzAllocPtr alloc); +void Buf_Free(CBuf *p, ISzAllocPtr alloc); + +typedef struct +{ + Byte *data; + size_t size; + size_t pos; +} CDynBuf; + +void DynBuf_Construct(CDynBuf *p); +void DynBuf_SeekToBeg(CDynBuf *p); +int DynBuf_Write(CDynBuf *p, const Byte *buf, size_t size, ISzAllocPtr alloc); +void DynBuf_Free(CDynBuf *p, ISzAllocPtr alloc); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/7zBuf2.c b/3rdparty/7z/src/7zBuf2.c new file mode 100644 index 0000000000..49b4343b67 --- /dev/null +++ b/3rdparty/7z/src/7zBuf2.c @@ -0,0 +1,52 @@ +/* 7zBuf2.c -- Byte Buffer +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "7zBuf.h" + +void DynBuf_Construct(CDynBuf *p) +{ + p->data = 0; + p->size = 0; + p->pos = 0; +} + +void DynBuf_SeekToBeg(CDynBuf *p) +{ + p->pos = 0; +} + +int DynBuf_Write(CDynBuf *p, const Byte *buf, size_t size, ISzAllocPtr alloc) +{ + if (size > p->size - p->pos) + { + size_t newSize = p->pos + size; + Byte *data; + newSize += newSize / 4; + data = (Byte *)ISzAlloc_Alloc(alloc, newSize); + if (!data) + return 0; + p->size = newSize; + if (p->pos != 0) + memcpy(data, p->data, p->pos); + ISzAlloc_Free(alloc, p->data); + p->data = data; + } + if (size != 0) + { + memcpy(p->data + p->pos, buf, size); + p->pos += size; + } + return 1; +} + +void DynBuf_Free(CDynBuf *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->data); + p->data = 0; + p->size = 0; + p->pos = 0; +} diff --git a/3rdparty/7z/src/7zCrc.c b/3rdparty/7z/src/7zCrc.c new file mode 100644 index 0000000000..40ab75952a --- /dev/null +++ b/3rdparty/7z/src/7zCrc.c @@ -0,0 +1,128 @@ +/* 7zCrc.c -- CRC32 init +2017-06-06 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "7zCrc.h" +#include "CpuArch.h" + +#define kCrcPoly 0xEDB88320 + +#ifdef MY_CPU_LE + #define CRC_NUM_TABLES 8 +#else + #define CRC_NUM_TABLES 9 + + #define CRC_UINT32_SWAP(v) ((v >> 24) | ((v >> 8) & 0xFF00) | ((v << 8) & 0xFF0000) | (v << 24)) + + UInt32 MY_FAST_CALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table); + UInt32 MY_FAST_CALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table); +#endif + +#ifndef MY_CPU_BE + UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table); + UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table); +#endif + +typedef UInt32 (MY_FAST_CALL *CRC_FUNC)(UInt32 v, const void *data, size_t size, const UInt32 *table); + +CRC_FUNC g_CrcUpdateT4; +CRC_FUNC g_CrcUpdateT8; +CRC_FUNC g_CrcUpdate; + +UInt32 g_CrcTable[256 * CRC_NUM_TABLES]; + +UInt32 MY_FAST_CALL CrcUpdate(UInt32 v, const void *data, size_t size) +{ + return g_CrcUpdate(v, data, size, g_CrcTable); +} + +UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size) +{ + return g_CrcUpdate(CRC_INIT_VAL, data, size, g_CrcTable) ^ CRC_INIT_VAL; +} + +#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8)) + +UInt32 MY_FAST_CALL CrcUpdateT1(UInt32 v, const void *data, size_t size, const UInt32 *table) +{ + const Byte *p = (const Byte *)data; + const Byte *pEnd = p + size; + for (; p != pEnd; p++) + v = CRC_UPDATE_BYTE_2(v, *p); + return v; +} + +void MY_FAST_CALL CrcGenerateTable() +{ + UInt32 i; + for (i = 0; i < 256; i++) + { + UInt32 r = i; + unsigned j; + for (j = 0; j < 8; j++) + r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1))); + g_CrcTable[i] = r; + } + for (i = 256; i < 256 * CRC_NUM_TABLES; i++) + { + UInt32 r = g_CrcTable[(size_t)i - 256]; + g_CrcTable[i] = g_CrcTable[r & 0xFF] ^ (r >> 8); + } + + #if CRC_NUM_TABLES < 4 + + g_CrcUpdate = CrcUpdateT1; + + #else + + #ifdef MY_CPU_LE + + g_CrcUpdateT4 = CrcUpdateT4; + g_CrcUpdate = CrcUpdateT4; + + #if CRC_NUM_TABLES >= 8 + g_CrcUpdateT8 = CrcUpdateT8; + + #ifdef MY_CPU_X86_OR_AMD64 + if (!CPU_Is_InOrder()) + #endif + g_CrcUpdate = CrcUpdateT8; + #endif + + #else + { + #ifndef MY_CPU_BE + UInt32 k = 0x01020304; + const Byte *p = (const Byte *)&k; + if (p[0] == 4 && p[1] == 3) + { + g_CrcUpdateT4 = CrcUpdateT4; + g_CrcUpdate = CrcUpdateT4; + #if CRC_NUM_TABLES >= 8 + g_CrcUpdateT8 = CrcUpdateT8; + g_CrcUpdate = CrcUpdateT8; + #endif + } + else if (p[0] != 1 || p[1] != 2) + g_CrcUpdate = CrcUpdateT1; + else + #endif + { + for (i = 256 * CRC_NUM_TABLES - 1; i >= 256; i--) + { + UInt32 x = g_CrcTable[(size_t)i - 256]; + g_CrcTable[i] = CRC_UINT32_SWAP(x); + } + g_CrcUpdateT4 = CrcUpdateT1_BeT4; + g_CrcUpdate = CrcUpdateT1_BeT4; + #if CRC_NUM_TABLES >= 8 + g_CrcUpdateT8 = CrcUpdateT1_BeT8; + g_CrcUpdate = CrcUpdateT1_BeT8; + #endif + } + } + #endif + + #endif +} diff --git a/3rdparty/7z/src/7zCrc.h b/3rdparty/7z/src/7zCrc.h new file mode 100644 index 0000000000..3b0459402b --- /dev/null +++ b/3rdparty/7z/src/7zCrc.h @@ -0,0 +1,25 @@ +/* 7zCrc.h -- CRC32 calculation +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __7Z_CRC_H +#define __7Z_CRC_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +extern UInt32 g_CrcTable[]; + +/* Call CrcGenerateTable one time before other CRC functions */ +void MY_FAST_CALL CrcGenerateTable(void); + +#define CRC_INIT_VAL 0xFFFFFFFF +#define CRC_GET_DIGEST(crc) ((crc) ^ CRC_INIT_VAL) +#define CRC_UPDATE_BYTE(crc, b) (g_CrcTable[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8)) + +UInt32 MY_FAST_CALL CrcUpdate(UInt32 crc, const void *data, size_t size); +UInt32 MY_FAST_CALL CrcCalc(const void *data, size_t size); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/7zCrcOpt.c b/3rdparty/7z/src/7zCrcOpt.c new file mode 100644 index 0000000000..2ee0de8459 --- /dev/null +++ b/3rdparty/7z/src/7zCrcOpt.c @@ -0,0 +1,115 @@ +/* 7zCrcOpt.c -- CRC32 calculation +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "CpuArch.h" + +#ifndef MY_CPU_BE + +#define CRC_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8)) + +UInt32 MY_FAST_CALL CrcUpdateT4(UInt32 v, const void *data, size_t size, const UInt32 *table) +{ + const Byte *p = (const Byte *)data; + for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++) + v = CRC_UPDATE_BYTE_2(v, *p); + for (; size >= 4; size -= 4, p += 4) + { + v ^= *(const UInt32 *)p; + v = + (table + 0x300)[((v ) & 0xFF)] + ^ (table + 0x200)[((v >> 8) & 0xFF)] + ^ (table + 0x100)[((v >> 16) & 0xFF)] + ^ (table + 0x000)[((v >> 24))]; + } + for (; size > 0; size--, p++) + v = CRC_UPDATE_BYTE_2(v, *p); + return v; +} + +UInt32 MY_FAST_CALL CrcUpdateT8(UInt32 v, const void *data, size_t size, const UInt32 *table) +{ + const Byte *p = (const Byte *)data; + for (; size > 0 && ((unsigned)(ptrdiff_t)p & 7) != 0; size--, p++) + v = CRC_UPDATE_BYTE_2(v, *p); + for (; size >= 8; size -= 8, p += 8) + { + UInt32 d; + v ^= *(const UInt32 *)p; + v = + (table + 0x700)[((v ) & 0xFF)] + ^ (table + 0x600)[((v >> 8) & 0xFF)] + ^ (table + 0x500)[((v >> 16) & 0xFF)] + ^ (table + 0x400)[((v >> 24))]; + d = *((const UInt32 *)p + 1); + v ^= + (table + 0x300)[((d ) & 0xFF)] + ^ (table + 0x200)[((d >> 8) & 0xFF)] + ^ (table + 0x100)[((d >> 16) & 0xFF)] + ^ (table + 0x000)[((d >> 24))]; + } + for (; size > 0; size--, p++) + v = CRC_UPDATE_BYTE_2(v, *p); + return v; +} + +#endif + + +#ifndef MY_CPU_LE + +#define CRC_UINT32_SWAP(v) ((v >> 24) | ((v >> 8) & 0xFF00) | ((v << 8) & 0xFF0000) | (v << 24)) + +#define CRC_UPDATE_BYTE_2_BE(crc, b) (table[(((crc) >> 24) ^ (b))] ^ ((crc) << 8)) + +UInt32 MY_FAST_CALL CrcUpdateT1_BeT4(UInt32 v, const void *data, size_t size, const UInt32 *table) +{ + const Byte *p = (const Byte *)data; + table += 0x100; + v = CRC_UINT32_SWAP(v); + for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++) + v = CRC_UPDATE_BYTE_2_BE(v, *p); + for (; size >= 4; size -= 4, p += 4) + { + v ^= *(const UInt32 *)p; + v = + (table + 0x000)[((v ) & 0xFF)] + ^ (table + 0x100)[((v >> 8) & 0xFF)] + ^ (table + 0x200)[((v >> 16) & 0xFF)] + ^ (table + 0x300)[((v >> 24))]; + } + for (; size > 0; size--, p++) + v = CRC_UPDATE_BYTE_2_BE(v, *p); + return CRC_UINT32_SWAP(v); +} + +UInt32 MY_FAST_CALL CrcUpdateT1_BeT8(UInt32 v, const void *data, size_t size, const UInt32 *table) +{ + const Byte *p = (const Byte *)data; + table += 0x100; + v = CRC_UINT32_SWAP(v); + for (; size > 0 && ((unsigned)(ptrdiff_t)p & 7) != 0; size--, p++) + v = CRC_UPDATE_BYTE_2_BE(v, *p); + for (; size >= 8; size -= 8, p += 8) + { + UInt32 d; + v ^= *(const UInt32 *)p; + v = + (table + 0x400)[((v ) & 0xFF)] + ^ (table + 0x500)[((v >> 8) & 0xFF)] + ^ (table + 0x600)[((v >> 16) & 0xFF)] + ^ (table + 0x700)[((v >> 24))]; + d = *((const UInt32 *)p + 1); + v ^= + (table + 0x000)[((d ) & 0xFF)] + ^ (table + 0x100)[((d >> 8) & 0xFF)] + ^ (table + 0x200)[((d >> 16) & 0xFF)] + ^ (table + 0x300)[((d >> 24))]; + } + for (; size > 0; size--, p++) + v = CRC_UPDATE_BYTE_2_BE(v, *p); + return CRC_UINT32_SWAP(v); +} + +#endif diff --git a/3rdparty/7z/src/7zDec.c b/3rdparty/7z/src/7zDec.c new file mode 100644 index 0000000000..2a7b09030e --- /dev/null +++ b/3rdparty/7z/src/7zDec.c @@ -0,0 +1,591 @@ +/* 7zDec.c -- Decoding from 7z folder +2019-02-02 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +/* #define _7ZIP_PPMD_SUPPPORT */ + +#include "7z.h" +#include "7zCrc.h" + +#include "Bcj2.h" +#include "Bra.h" +#include "CpuArch.h" +#include "Delta.h" +#include "LzmaDec.h" +#include "Lzma2Dec.h" +#ifdef _7ZIP_PPMD_SUPPPORT +#include "Ppmd7.h" +#endif + +#define k_Copy 0 +#define k_Delta 3 +#define k_LZMA2 0x21 +#define k_LZMA 0x30101 +#define k_BCJ 0x3030103 +#define k_BCJ2 0x303011B +#define k_PPC 0x3030205 +#define k_IA64 0x3030401 +#define k_ARM 0x3030501 +#define k_ARMT 0x3030701 +#define k_SPARC 0x3030805 + + +#ifdef _7ZIP_PPMD_SUPPPORT + +#define k_PPMD 0x30401 + +typedef struct +{ + IByteIn vt; + const Byte *cur; + const Byte *end; + const Byte *begin; + UInt64 processed; + BoolInt extra; + SRes res; + const ILookInStream *inStream; +} CByteInToLook; + +static Byte ReadByte(const IByteIn *pp) +{ + CByteInToLook *p = CONTAINER_FROM_VTBL(pp, CByteInToLook, vt); + if (p->cur != p->end) + return *p->cur++; + if (p->res == SZ_OK) + { + size_t size = p->cur - p->begin; + p->processed += size; + p->res = ILookInStream_Skip(p->inStream, size); + size = (1 << 25); + p->res = ILookInStream_Look(p->inStream, (const void **)&p->begin, &size); + p->cur = p->begin; + p->end = p->begin + size; + if (size != 0) + return *p->cur++;; + } + p->extra = True; + return 0; +} + +static SRes SzDecodePpmd(const Byte *props, unsigned propsSize, UInt64 inSize, const ILookInStream *inStream, + Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain) +{ + CPpmd7 ppmd; + CByteInToLook s; + SRes res = SZ_OK; + + s.vt.Read = ReadByte; + s.inStream = inStream; + s.begin = s.end = s.cur = NULL; + s.extra = False; + s.res = SZ_OK; + s.processed = 0; + + if (propsSize != 5) + return SZ_ERROR_UNSUPPORTED; + + { + unsigned order = props[0]; + UInt32 memSize = GetUi32(props + 1); + if (order < PPMD7_MIN_ORDER || + order > PPMD7_MAX_ORDER || + memSize < PPMD7_MIN_MEM_SIZE || + memSize > PPMD7_MAX_MEM_SIZE) + return SZ_ERROR_UNSUPPORTED; + Ppmd7_Construct(&ppmd); + if (!Ppmd7_Alloc(&ppmd, memSize, allocMain)) + return SZ_ERROR_MEM; + Ppmd7_Init(&ppmd, order); + } + { + CPpmd7z_RangeDec rc; + Ppmd7z_RangeDec_CreateVTable(&rc); + rc.Stream = &s.vt; + if (!Ppmd7z_RangeDec_Init(&rc)) + res = SZ_ERROR_DATA; + else if (s.extra) + res = (s.res != SZ_OK ? s.res : SZ_ERROR_DATA); + else + { + SizeT i; + for (i = 0; i < outSize; i++) + { + int sym = Ppmd7_DecodeSymbol(&ppmd, &rc.vt); + if (s.extra || sym < 0) + break; + outBuffer[i] = (Byte)sym; + } + if (i != outSize) + res = (s.res != SZ_OK ? s.res : SZ_ERROR_DATA); + else if (s.processed + (s.cur - s.begin) != inSize || !Ppmd7z_RangeDec_IsFinishedOK(&rc)) + res = SZ_ERROR_DATA; + } + } + Ppmd7_Free(&ppmd, allocMain); + return res; +} + +#endif + + +static SRes SzDecodeLzma(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStream *inStream, + Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain) +{ + CLzmaDec state; + SRes res = SZ_OK; + + LzmaDec_Construct(&state); + RINOK(LzmaDec_AllocateProbs(&state, props, propsSize, allocMain)); + state.dic = outBuffer; + state.dicBufSize = outSize; + LzmaDec_Init(&state); + + for (;;) + { + const void *inBuf = NULL; + size_t lookahead = (1 << 18); + if (lookahead > inSize) + lookahead = (size_t)inSize; + res = ILookInStream_Look(inStream, &inBuf, &lookahead); + if (res != SZ_OK) + break; + + { + SizeT inProcessed = (SizeT)lookahead, dicPos = state.dicPos; + ELzmaStatus status; + res = LzmaDec_DecodeToDic(&state, outSize, (const Byte *)inBuf, &inProcessed, LZMA_FINISH_END, &status); + lookahead -= inProcessed; + inSize -= inProcessed; + if (res != SZ_OK) + break; + + if (status == LZMA_STATUS_FINISHED_WITH_MARK) + { + if (outSize != state.dicPos || inSize != 0) + res = SZ_ERROR_DATA; + break; + } + + if (outSize == state.dicPos && inSize == 0 && status == LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK) + break; + + if (inProcessed == 0 && dicPos == state.dicPos) + { + res = SZ_ERROR_DATA; + break; + } + + res = ILookInStream_Skip(inStream, inProcessed); + if (res != SZ_OK) + break; + } + } + + LzmaDec_FreeProbs(&state, allocMain); + return res; +} + + +#ifndef _7Z_NO_METHOD_LZMA2 + +static SRes SzDecodeLzma2(const Byte *props, unsigned propsSize, UInt64 inSize, ILookInStream *inStream, + Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain) +{ + CLzma2Dec state; + SRes res = SZ_OK; + + Lzma2Dec_Construct(&state); + if (propsSize != 1) + return SZ_ERROR_DATA; + RINOK(Lzma2Dec_AllocateProbs(&state, props[0], allocMain)); + state.decoder.dic = outBuffer; + state.decoder.dicBufSize = outSize; + Lzma2Dec_Init(&state); + + for (;;) + { + const void *inBuf = NULL; + size_t lookahead = (1 << 18); + if (lookahead > inSize) + lookahead = (size_t)inSize; + res = ILookInStream_Look(inStream, &inBuf, &lookahead); + if (res != SZ_OK) + break; + + { + SizeT inProcessed = (SizeT)lookahead, dicPos = state.decoder.dicPos; + ELzmaStatus status; + res = Lzma2Dec_DecodeToDic(&state, outSize, (const Byte *)inBuf, &inProcessed, LZMA_FINISH_END, &status); + lookahead -= inProcessed; + inSize -= inProcessed; + if (res != SZ_OK) + break; + + if (status == LZMA_STATUS_FINISHED_WITH_MARK) + { + if (outSize != state.decoder.dicPos || inSize != 0) + res = SZ_ERROR_DATA; + break; + } + + if (inProcessed == 0 && dicPos == state.decoder.dicPos) + { + res = SZ_ERROR_DATA; + break; + } + + res = ILookInStream_Skip(inStream, inProcessed); + if (res != SZ_OK) + break; + } + } + + Lzma2Dec_FreeProbs(&state, allocMain); + return res; +} + +#endif + + +static SRes SzDecodeCopy(UInt64 inSize, ILookInStream *inStream, Byte *outBuffer) +{ + while (inSize > 0) + { + const void *inBuf; + size_t curSize = (1 << 18); + if (curSize > inSize) + curSize = (size_t)inSize; + RINOK(ILookInStream_Look(inStream, &inBuf, &curSize)); + if (curSize == 0) + return SZ_ERROR_INPUT_EOF; + memcpy(outBuffer, inBuf, curSize); + outBuffer += curSize; + inSize -= curSize; + RINOK(ILookInStream_Skip(inStream, curSize)); + } + return SZ_OK; +} + +static BoolInt IS_MAIN_METHOD(UInt32 m) +{ + switch (m) + { + case k_Copy: + case k_LZMA: + #ifndef _7Z_NO_METHOD_LZMA2 + case k_LZMA2: + #endif + #ifdef _7ZIP_PPMD_SUPPPORT + case k_PPMD: + #endif + return True; + } + return False; +} + +static BoolInt IS_SUPPORTED_CODER(const CSzCoderInfo *c) +{ + return + c->NumStreams == 1 + /* && c->MethodID <= (UInt32)0xFFFFFFFF */ + && IS_MAIN_METHOD((UInt32)c->MethodID); +} + +#define IS_BCJ2(c) ((c)->MethodID == k_BCJ2 && (c)->NumStreams == 4) + +static SRes CheckSupportedFolder(const CSzFolder *f) +{ + if (f->NumCoders < 1 || f->NumCoders > 4) + return SZ_ERROR_UNSUPPORTED; + if (!IS_SUPPORTED_CODER(&f->Coders[0])) + return SZ_ERROR_UNSUPPORTED; + if (f->NumCoders == 1) + { + if (f->NumPackStreams != 1 || f->PackStreams[0] != 0 || f->NumBonds != 0) + return SZ_ERROR_UNSUPPORTED; + return SZ_OK; + } + + + #ifndef _7Z_NO_METHODS_FILTERS + + if (f->NumCoders == 2) + { + const CSzCoderInfo *c = &f->Coders[1]; + if ( + /* c->MethodID > (UInt32)0xFFFFFFFF || */ + c->NumStreams != 1 + || f->NumPackStreams != 1 + || f->PackStreams[0] != 0 + || f->NumBonds != 1 + || f->Bonds[0].InIndex != 1 + || f->Bonds[0].OutIndex != 0) + return SZ_ERROR_UNSUPPORTED; + switch ((UInt32)c->MethodID) + { + case k_Delta: + case k_BCJ: + case k_PPC: + case k_IA64: + case k_SPARC: + case k_ARM: + case k_ARMT: + break; + default: + return SZ_ERROR_UNSUPPORTED; + } + return SZ_OK; + } + + #endif + + + if (f->NumCoders == 4) + { + if (!IS_SUPPORTED_CODER(&f->Coders[1]) + || !IS_SUPPORTED_CODER(&f->Coders[2]) + || !IS_BCJ2(&f->Coders[3])) + return SZ_ERROR_UNSUPPORTED; + if (f->NumPackStreams != 4 + || f->PackStreams[0] != 2 + || f->PackStreams[1] != 6 + || f->PackStreams[2] != 1 + || f->PackStreams[3] != 0 + || f->NumBonds != 3 + || f->Bonds[0].InIndex != 5 || f->Bonds[0].OutIndex != 0 + || f->Bonds[1].InIndex != 4 || f->Bonds[1].OutIndex != 1 + || f->Bonds[2].InIndex != 3 || f->Bonds[2].OutIndex != 2) + return SZ_ERROR_UNSUPPORTED; + return SZ_OK; + } + + return SZ_ERROR_UNSUPPORTED; +} + +#define CASE_BRA_CONV(isa) case k_ ## isa: isa ## _Convert(outBuffer, outSize, 0, 0); break; + +static SRes SzFolder_Decode2(const CSzFolder *folder, + const Byte *propsData, + const UInt64 *unpackSizes, + const UInt64 *packPositions, + ILookInStream *inStream, UInt64 startPos, + Byte *outBuffer, SizeT outSize, ISzAllocPtr allocMain, + Byte *tempBuf[]) +{ + UInt32 ci; + SizeT tempSizes[3] = { 0, 0, 0}; + SizeT tempSize3 = 0; + Byte *tempBuf3 = 0; + + RINOK(CheckSupportedFolder(folder)); + + for (ci = 0; ci < folder->NumCoders; ci++) + { + const CSzCoderInfo *coder = &folder->Coders[ci]; + + if (IS_MAIN_METHOD((UInt32)coder->MethodID)) + { + UInt32 si = 0; + UInt64 offset; + UInt64 inSize; + Byte *outBufCur = outBuffer; + SizeT outSizeCur = outSize; + if (folder->NumCoders == 4) + { + UInt32 indices[] = { 3, 2, 0 }; + UInt64 unpackSize = unpackSizes[ci]; + si = indices[ci]; + if (ci < 2) + { + Byte *temp; + outSizeCur = (SizeT)unpackSize; + if (outSizeCur != unpackSize) + return SZ_ERROR_MEM; + temp = (Byte *)ISzAlloc_Alloc(allocMain, outSizeCur); + if (!temp && outSizeCur != 0) + return SZ_ERROR_MEM; + outBufCur = tempBuf[1 - ci] = temp; + tempSizes[1 - ci] = outSizeCur; + } + else if (ci == 2) + { + if (unpackSize > outSize) /* check it */ + return SZ_ERROR_PARAM; + tempBuf3 = outBufCur = outBuffer + (outSize - (size_t)unpackSize); + tempSize3 = outSizeCur = (SizeT)unpackSize; + } + else + return SZ_ERROR_UNSUPPORTED; + } + offset = packPositions[si]; + inSize = packPositions[(size_t)si + 1] - offset; + RINOK(LookInStream_SeekTo(inStream, startPos + offset)); + + if (coder->MethodID == k_Copy) + { + if (inSize != outSizeCur) /* check it */ + return SZ_ERROR_DATA; + RINOK(SzDecodeCopy(inSize, inStream, outBufCur)); + } + else if (coder->MethodID == k_LZMA) + { + RINOK(SzDecodeLzma(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain)); + } + #ifndef _7Z_NO_METHOD_LZMA2 + else if (coder->MethodID == k_LZMA2) + { + RINOK(SzDecodeLzma2(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain)); + } + #endif + #ifdef _7ZIP_PPMD_SUPPPORT + else if (coder->MethodID == k_PPMD) + { + RINOK(SzDecodePpmd(propsData + coder->PropsOffset, coder->PropsSize, inSize, inStream, outBufCur, outSizeCur, allocMain)); + } + #endif + else + return SZ_ERROR_UNSUPPORTED; + } + else if (coder->MethodID == k_BCJ2) + { + UInt64 offset = packPositions[1]; + UInt64 s3Size = packPositions[2] - offset; + + if (ci != 3) + return SZ_ERROR_UNSUPPORTED; + + tempSizes[2] = (SizeT)s3Size; + if (tempSizes[2] != s3Size) + return SZ_ERROR_MEM; + tempBuf[2] = (Byte *)ISzAlloc_Alloc(allocMain, tempSizes[2]); + if (!tempBuf[2] && tempSizes[2] != 0) + return SZ_ERROR_MEM; + + RINOK(LookInStream_SeekTo(inStream, startPos + offset)); + RINOK(SzDecodeCopy(s3Size, inStream, tempBuf[2])); + + if ((tempSizes[0] & 3) != 0 || + (tempSizes[1] & 3) != 0 || + tempSize3 + tempSizes[0] + tempSizes[1] != outSize) + return SZ_ERROR_DATA; + + { + CBcj2Dec p; + + p.bufs[0] = tempBuf3; p.lims[0] = tempBuf3 + tempSize3; + p.bufs[1] = tempBuf[0]; p.lims[1] = tempBuf[0] + tempSizes[0]; + p.bufs[2] = tempBuf[1]; p.lims[2] = tempBuf[1] + tempSizes[1]; + p.bufs[3] = tempBuf[2]; p.lims[3] = tempBuf[2] + tempSizes[2]; + + p.dest = outBuffer; + p.destLim = outBuffer + outSize; + + Bcj2Dec_Init(&p); + RINOK(Bcj2Dec_Decode(&p)); + + { + unsigned i; + for (i = 0; i < 4; i++) + if (p.bufs[i] != p.lims[i]) + return SZ_ERROR_DATA; + + if (!Bcj2Dec_IsFinished(&p)) + return SZ_ERROR_DATA; + + if (p.dest != p.destLim + || p.state != BCJ2_STREAM_MAIN) + return SZ_ERROR_DATA; + } + } + } + #ifndef _7Z_NO_METHODS_FILTERS + else if (ci == 1) + { + if (coder->MethodID == k_Delta) + { + if (coder->PropsSize != 1) + return SZ_ERROR_UNSUPPORTED; + { + Byte state[DELTA_STATE_SIZE]; + Delta_Init(state); + Delta_Decode(state, (unsigned)(propsData[coder->PropsOffset]) + 1, outBuffer, outSize); + } + } + else + { + if (coder->PropsSize != 0) + return SZ_ERROR_UNSUPPORTED; + switch (coder->MethodID) + { + case k_BCJ: + { + UInt32 state; + x86_Convert_Init(state); + x86_Convert(outBuffer, outSize, 0, &state, 0); + break; + } + CASE_BRA_CONV(PPC) + CASE_BRA_CONV(IA64) + CASE_BRA_CONV(SPARC) + CASE_BRA_CONV(ARM) + CASE_BRA_CONV(ARMT) + default: + return SZ_ERROR_UNSUPPORTED; + } + } + } + #endif + else + return SZ_ERROR_UNSUPPORTED; + } + + return SZ_OK; +} + + +SRes SzAr_DecodeFolder(const CSzAr *p, UInt32 folderIndex, + ILookInStream *inStream, UInt64 startPos, + Byte *outBuffer, size_t outSize, + ISzAllocPtr allocMain) +{ + SRes res; + CSzFolder folder; + CSzData sd; + + const Byte *data = p->CodersData + p->FoCodersOffsets[folderIndex]; + sd.Data = data; + sd.Size = p->FoCodersOffsets[(size_t)folderIndex + 1] - p->FoCodersOffsets[folderIndex]; + + res = SzGetNextFolderItem(&folder, &sd); + + if (res != SZ_OK) + return res; + + if (sd.Size != 0 + || folder.UnpackStream != p->FoToMainUnpackSizeIndex[folderIndex] + || outSize != SzAr_GetFolderUnpackSize(p, folderIndex)) + return SZ_ERROR_FAIL; + { + unsigned i; + Byte *tempBuf[3] = { 0, 0, 0}; + + res = SzFolder_Decode2(&folder, data, + &p->CoderUnpackSizes[p->FoToCoderUnpackSizes[folderIndex]], + p->PackPositions + p->FoStartPackStreamIndex[folderIndex], + inStream, startPos, + outBuffer, (SizeT)outSize, allocMain, tempBuf); + + for (i = 0; i < 3; i++) + ISzAlloc_Free(allocMain, tempBuf[i]); + + if (res == SZ_OK) + if (SzBitWithVals_Check(&p->FolderCRCs, folderIndex)) + if (CrcCalc(outBuffer, outSize) != p->FolderCRCs.Vals[folderIndex]) + res = SZ_ERROR_CRC; + + return res; + } +} diff --git a/3rdparty/7z/src/7zFile.c b/3rdparty/7z/src/7zFile.c new file mode 100644 index 0000000000..e486901e30 --- /dev/null +++ b/3rdparty/7z/src/7zFile.c @@ -0,0 +1,286 @@ +/* 7zFile.c -- File IO +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "7zFile.h" + +#ifndef USE_WINDOWS_FILE + +#ifndef UNDER_CE +#include +#endif + +#else + +/* + ReadFile and WriteFile functions in Windows have BUG: + If you Read or Write 64MB or more (probably min_failure_size = 64MB - 32KB + 1) + from/to Network file, it returns ERROR_NO_SYSTEM_RESOURCES + (Insufficient system resources exist to complete the requested service). + Probably in some version of Windows there are problems with other sizes: + for 32 MB (maybe also for 16 MB). + And message can be "Network connection was lost" +*/ + +#define kChunkSizeMax (1 << 22) + +#endif + +void File_Construct(CSzFile *p) +{ + #ifdef USE_WINDOWS_FILE + p->handle = INVALID_HANDLE_VALUE; + #else + p->file = NULL; + #endif +} + +#if !defined(UNDER_CE) || !defined(USE_WINDOWS_FILE) +static WRes File_Open(CSzFile *p, const char *name, int writeMode) +{ + #ifdef USE_WINDOWS_FILE + p->handle = CreateFileA(name, + writeMode ? GENERIC_WRITE : GENERIC_READ, + FILE_SHARE_READ, NULL, + writeMode ? CREATE_ALWAYS : OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, NULL); + return (p->handle != INVALID_HANDLE_VALUE) ? 0 : GetLastError(); + #else + p->file = fopen(name, writeMode ? "wb+" : "rb"); + return (p->file != 0) ? 0 : + #ifdef UNDER_CE + 2; /* ENOENT */ + #else + errno; + #endif + #endif +} + +WRes InFile_Open(CSzFile *p, const char *name) { return File_Open(p, name, 0); } +WRes OutFile_Open(CSzFile *p, const char *name) { return File_Open(p, name, 1); } +#endif + +#ifdef USE_WINDOWS_FILE +static WRes File_OpenW(CSzFile *p, const WCHAR *name, int writeMode) +{ + p->handle = CreateFileW(name, + writeMode ? GENERIC_WRITE : GENERIC_READ, + FILE_SHARE_READ, NULL, + writeMode ? CREATE_ALWAYS : OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, NULL); + return (p->handle != INVALID_HANDLE_VALUE) ? 0 : GetLastError(); +} +WRes InFile_OpenW(CSzFile *p, const WCHAR *name) { return File_OpenW(p, name, 0); } +WRes OutFile_OpenW(CSzFile *p, const WCHAR *name) { return File_OpenW(p, name, 1); } +#endif + +WRes File_Close(CSzFile *p) +{ + #ifdef USE_WINDOWS_FILE + if (p->handle != INVALID_HANDLE_VALUE) + { + if (!CloseHandle(p->handle)) + return GetLastError(); + p->handle = INVALID_HANDLE_VALUE; + } + #else + if (p->file != NULL) + { + int res = fclose(p->file); + if (res != 0) + return res; + p->file = NULL; + } + #endif + return 0; +} + +WRes File_Read(CSzFile *p, void *data, size_t *size) +{ + size_t originalSize = *size; + if (originalSize == 0) + return 0; + + #ifdef USE_WINDOWS_FILE + + *size = 0; + do + { + DWORD curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : (DWORD)originalSize; + DWORD processed = 0; + BOOL res = ReadFile(p->handle, data, curSize, &processed, NULL); + data = (void *)((Byte *)data + processed); + originalSize -= processed; + *size += processed; + if (!res) + return GetLastError(); + if (processed == 0) + break; + } + while (originalSize > 0); + return 0; + + #else + + *size = fread(data, 1, originalSize, p->file); + if (*size == originalSize) + return 0; + return ferror(p->file); + + #endif +} + +WRes File_Write(CSzFile *p, const void *data, size_t *size) +{ + size_t originalSize = *size; + if (originalSize == 0) + return 0; + + #ifdef USE_WINDOWS_FILE + + *size = 0; + do + { + DWORD curSize = (originalSize > kChunkSizeMax) ? kChunkSizeMax : (DWORD)originalSize; + DWORD processed = 0; + BOOL res = WriteFile(p->handle, data, curSize, &processed, NULL); + data = (void *)((Byte *)data + processed); + originalSize -= processed; + *size += processed; + if (!res) + return GetLastError(); + if (processed == 0) + break; + } + while (originalSize > 0); + return 0; + + #else + + *size = fwrite(data, 1, originalSize, p->file); + if (*size == originalSize) + return 0; + return ferror(p->file); + + #endif +} + +WRes File_Seek(CSzFile *p, Int64 *pos, ESzSeek origin) +{ + #ifdef USE_WINDOWS_FILE + + LARGE_INTEGER value; + DWORD moveMethod; + value.LowPart = (DWORD)*pos; + value.HighPart = (LONG)((UInt64)*pos >> 16 >> 16); /* for case when UInt64 is 32-bit only */ + switch (origin) + { + case SZ_SEEK_SET: moveMethod = FILE_BEGIN; break; + case SZ_SEEK_CUR: moveMethod = FILE_CURRENT; break; + case SZ_SEEK_END: moveMethod = FILE_END; break; + default: return ERROR_INVALID_PARAMETER; + } + value.LowPart = SetFilePointer(p->handle, value.LowPart, &value.HighPart, moveMethod); + if (value.LowPart == 0xFFFFFFFF) + { + WRes res = GetLastError(); + if (res != NO_ERROR) + return res; + } + *pos = ((Int64)value.HighPart << 32) | value.LowPart; + return 0; + + #else + + int moveMethod; + int res; + switch (origin) + { + case SZ_SEEK_SET: moveMethod = SEEK_SET; break; + case SZ_SEEK_CUR: moveMethod = SEEK_CUR; break; + case SZ_SEEK_END: moveMethod = SEEK_END; break; + default: return 1; + } + res = fseek(p->file, (long)*pos, moveMethod); + *pos = ftell(p->file); + return res; + + #endif +} + +WRes File_GetLength(CSzFile *p, UInt64 *length) +{ + #ifdef USE_WINDOWS_FILE + + DWORD sizeHigh; + DWORD sizeLow = GetFileSize(p->handle, &sizeHigh); + if (sizeLow == 0xFFFFFFFF) + { + DWORD res = GetLastError(); + if (res != NO_ERROR) + return res; + } + *length = (((UInt64)sizeHigh) << 32) + sizeLow; + return 0; + + #else + + long pos = ftell(p->file); + int res = fseek(p->file, 0, SEEK_END); + *length = ftell(p->file); + fseek(p->file, pos, SEEK_SET); + return res; + + #endif +} + + +/* ---------- FileSeqInStream ---------- */ + +static SRes FileSeqInStream_Read(const ISeqInStream *pp, void *buf, size_t *size) +{ + CFileSeqInStream *p = CONTAINER_FROM_VTBL(pp, CFileSeqInStream, vt); + return File_Read(&p->file, buf, size) == 0 ? SZ_OK : SZ_ERROR_READ; +} + +void FileSeqInStream_CreateVTable(CFileSeqInStream *p) +{ + p->vt.Read = FileSeqInStream_Read; +} + + +/* ---------- FileInStream ---------- */ + +static SRes FileInStream_Read(const ISeekInStream *pp, void *buf, size_t *size) +{ + CFileInStream *p = CONTAINER_FROM_VTBL(pp, CFileInStream, vt); + return (File_Read(&p->file, buf, size) == 0) ? SZ_OK : SZ_ERROR_READ; +} + +static SRes FileInStream_Seek(const ISeekInStream *pp, Int64 *pos, ESzSeek origin) +{ + CFileInStream *p = CONTAINER_FROM_VTBL(pp, CFileInStream, vt); + return File_Seek(&p->file, pos, origin); +} + +void FileInStream_CreateVTable(CFileInStream *p) +{ + p->vt.Read = FileInStream_Read; + p->vt.Seek = FileInStream_Seek; +} + + +/* ---------- FileOutStream ---------- */ + +static size_t FileOutStream_Write(const ISeqOutStream *pp, const void *data, size_t size) +{ + CFileOutStream *p = CONTAINER_FROM_VTBL(pp, CFileOutStream, vt); + File_Write(&p->file, data, &size); + return size; +} + +void FileOutStream_CreateVTable(CFileOutStream *p) +{ + p->vt.Write = FileOutStream_Write; +} diff --git a/3rdparty/7z/src/7zFile.h b/3rdparty/7z/src/7zFile.h new file mode 100644 index 0000000000..7e263bea1b --- /dev/null +++ b/3rdparty/7z/src/7zFile.h @@ -0,0 +1,83 @@ +/* 7zFile.h -- File IO +2017-04-03 : Igor Pavlov : Public domain */ + +#ifndef __7Z_FILE_H +#define __7Z_FILE_H + +#ifdef _WIN32 +#define USE_WINDOWS_FILE +#endif + +#ifdef USE_WINDOWS_FILE +#include +#else +#include +#endif + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +/* ---------- File ---------- */ + +typedef struct +{ + #ifdef USE_WINDOWS_FILE + HANDLE handle; + #else + FILE *file; + #endif +} CSzFile; + +void File_Construct(CSzFile *p); +#if !defined(UNDER_CE) || !defined(USE_WINDOWS_FILE) +WRes InFile_Open(CSzFile *p, const char *name); +WRes OutFile_Open(CSzFile *p, const char *name); +#endif +#ifdef USE_WINDOWS_FILE +WRes InFile_OpenW(CSzFile *p, const WCHAR *name); +WRes OutFile_OpenW(CSzFile *p, const WCHAR *name); +#endif +WRes File_Close(CSzFile *p); + +/* reads max(*size, remain file's size) bytes */ +WRes File_Read(CSzFile *p, void *data, size_t *size); + +/* writes *size bytes */ +WRes File_Write(CSzFile *p, const void *data, size_t *size); + +WRes File_Seek(CSzFile *p, Int64 *pos, ESzSeek origin); +WRes File_GetLength(CSzFile *p, UInt64 *length); + + +/* ---------- FileInStream ---------- */ + +typedef struct +{ + ISeqInStream vt; + CSzFile file; +} CFileSeqInStream; + +void FileSeqInStream_CreateVTable(CFileSeqInStream *p); + + +typedef struct +{ + ISeekInStream vt; + CSzFile file; +} CFileInStream; + +void FileInStream_CreateVTable(CFileInStream *p); + + +typedef struct +{ + ISeqOutStream vt; + CSzFile file; +} CFileOutStream; + +void FileOutStream_CreateVTable(CFileOutStream *p); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/7zStream.c b/3rdparty/7z/src/7zStream.c new file mode 100644 index 0000000000..579741fadc --- /dev/null +++ b/3rdparty/7z/src/7zStream.c @@ -0,0 +1,176 @@ +/* 7zStream.c -- 7z Stream functions +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "7zTypes.h" + +SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType) +{ + while (size != 0) + { + size_t processed = size; + RINOK(ISeqInStream_Read(stream, buf, &processed)); + if (processed == 0) + return errorType; + buf = (void *)((Byte *)buf + processed); + size -= processed; + } + return SZ_OK; +} + +SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size) +{ + return SeqInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF); +} + +SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf) +{ + size_t processed = 1; + RINOK(ISeqInStream_Read(stream, buf, &processed)); + return (processed == 1) ? SZ_OK : SZ_ERROR_INPUT_EOF; +} + + + +SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset) +{ + Int64 t = offset; + return ILookInStream_Seek(stream, &t, SZ_SEEK_SET); +} + +SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size) +{ + const void *lookBuf; + if (*size == 0) + return SZ_OK; + RINOK(ILookInStream_Look(stream, &lookBuf, size)); + memcpy(buf, lookBuf, *size); + return ILookInStream_Skip(stream, *size); +} + +SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType) +{ + while (size != 0) + { + size_t processed = size; + RINOK(ILookInStream_Read(stream, buf, &processed)); + if (processed == 0) + return errorType; + buf = (void *)((Byte *)buf + processed); + size -= processed; + } + return SZ_OK; +} + +SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size) +{ + return LookInStream_Read2(stream, buf, size, SZ_ERROR_INPUT_EOF); +} + + + +#define GET_LookToRead2 CLookToRead2 *p = CONTAINER_FROM_VTBL(pp, CLookToRead2, vt); + +static SRes LookToRead2_Look_Lookahead(const ILookInStream *pp, const void **buf, size_t *size) +{ + SRes res = SZ_OK; + GET_LookToRead2 + size_t size2 = p->size - p->pos; + if (size2 == 0 && *size != 0) + { + p->pos = 0; + p->size = 0; + size2 = p->bufSize; + res = ISeekInStream_Read(p->realStream, p->buf, &size2); + p->size = size2; + } + if (*size > size2) + *size = size2; + *buf = p->buf + p->pos; + return res; +} + +static SRes LookToRead2_Look_Exact(const ILookInStream *pp, const void **buf, size_t *size) +{ + SRes res = SZ_OK; + GET_LookToRead2 + size_t size2 = p->size - p->pos; + if (size2 == 0 && *size != 0) + { + p->pos = 0; + p->size = 0; + if (*size > p->bufSize) + *size = p->bufSize; + res = ISeekInStream_Read(p->realStream, p->buf, size); + size2 = p->size = *size; + } + if (*size > size2) + *size = size2; + *buf = p->buf + p->pos; + return res; +} + +static SRes LookToRead2_Skip(const ILookInStream *pp, size_t offset) +{ + GET_LookToRead2 + p->pos += offset; + return SZ_OK; +} + +static SRes LookToRead2_Read(const ILookInStream *pp, void *buf, size_t *size) +{ + GET_LookToRead2 + size_t rem = p->size - p->pos; + if (rem == 0) + return ISeekInStream_Read(p->realStream, buf, size); + if (rem > *size) + rem = *size; + memcpy(buf, p->buf + p->pos, rem); + p->pos += rem; + *size = rem; + return SZ_OK; +} + +static SRes LookToRead2_Seek(const ILookInStream *pp, Int64 *pos, ESzSeek origin) +{ + GET_LookToRead2 + p->pos = p->size = 0; + return ISeekInStream_Seek(p->realStream, pos, origin); +} + +void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead) +{ + p->vt.Look = lookahead ? + LookToRead2_Look_Lookahead : + LookToRead2_Look_Exact; + p->vt.Skip = LookToRead2_Skip; + p->vt.Read = LookToRead2_Read; + p->vt.Seek = LookToRead2_Seek; +} + + + +static SRes SecToLook_Read(const ISeqInStream *pp, void *buf, size_t *size) +{ + CSecToLook *p = CONTAINER_FROM_VTBL(pp, CSecToLook, vt); + return LookInStream_LookRead(p->realStream, buf, size); +} + +void SecToLook_CreateVTable(CSecToLook *p) +{ + p->vt.Read = SecToLook_Read; +} + +static SRes SecToRead_Read(const ISeqInStream *pp, void *buf, size_t *size) +{ + CSecToRead *p = CONTAINER_FROM_VTBL(pp, CSecToRead, vt); + return ILookInStream_Read(p->realStream, buf, size); +} + +void SecToRead_CreateVTable(CSecToRead *p) +{ + p->vt.Read = SecToRead_Read; +} diff --git a/3rdparty/7z/src/7zTypes.h b/3rdparty/7z/src/7zTypes.h new file mode 100644 index 0000000000..593f5aa259 --- /dev/null +++ b/3rdparty/7z/src/7zTypes.h @@ -0,0 +1,375 @@ +/* 7zTypes.h -- Basic types +2018-08-04 : Igor Pavlov : Public domain */ + +#ifndef __7Z_TYPES_H +#define __7Z_TYPES_H + +#ifdef _WIN32 +/* #include */ +#endif + +#include + +#ifndef EXTERN_C_BEGIN +#ifdef __cplusplus +#define EXTERN_C_BEGIN extern "C" { +#define EXTERN_C_END } +#else +#define EXTERN_C_BEGIN +#define EXTERN_C_END +#endif +#endif + +EXTERN_C_BEGIN + +#define SZ_OK 0 + +#define SZ_ERROR_DATA 1 +#define SZ_ERROR_MEM 2 +#define SZ_ERROR_CRC 3 +#define SZ_ERROR_UNSUPPORTED 4 +#define SZ_ERROR_PARAM 5 +#define SZ_ERROR_INPUT_EOF 6 +#define SZ_ERROR_OUTPUT_EOF 7 +#define SZ_ERROR_READ 8 +#define SZ_ERROR_WRITE 9 +#define SZ_ERROR_PROGRESS 10 +#define SZ_ERROR_FAIL 11 +#define SZ_ERROR_THREAD 12 + +#define SZ_ERROR_ARCHIVE 16 +#define SZ_ERROR_NO_ARCHIVE 17 + +typedef int SRes; + + +#ifdef _WIN32 + +/* typedef DWORD WRes; */ +typedef unsigned WRes; +#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x) + +#else + +typedef int WRes; +#define MY__FACILITY_WIN32 7 +#define MY__FACILITY__WRes MY__FACILITY_WIN32 +#define MY_SRes_HRESULT_FROM_WRes(x) ((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : ((HRESULT) (((x) & 0x0000FFFF) | (MY__FACILITY__WRes << 16) | 0x80000000))) + +#endif + + +#ifndef RINOK +#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; } +#endif + +typedef unsigned char Byte; +typedef short Int16; +typedef unsigned short UInt16; + +#ifdef _LZMA_UINT32_IS_ULONG +typedef long Int32; +typedef unsigned long UInt32; +#else +typedef int Int32; +typedef unsigned int UInt32; +#endif + +#ifdef _SZ_NO_INT_64 + +/* define _SZ_NO_INT_64, if your compiler doesn't support 64-bit integers. + NOTES: Some code will work incorrectly in that case! */ + +typedef long Int64; +typedef unsigned long UInt64; + +#else + +#if defined(_MSC_VER) || defined(__BORLANDC__) +typedef __int64 Int64; +typedef unsigned __int64 UInt64; +#define UINT64_CONST(n) n +#else +typedef long long int Int64; +typedef unsigned long long int UInt64; +#define UINT64_CONST(n) n ## ULL +#endif + +#endif + +#ifdef _LZMA_NO_SYSTEM_SIZE_T +typedef UInt32 SizeT; +#else +typedef size_t SizeT; +#endif + +typedef int BoolInt; +/* typedef BoolInt Bool; */ +#define True 1 +#define False 0 + + +#ifdef _WIN32 +#define MY_STD_CALL __stdcall +#else +#define MY_STD_CALL +#endif + +#ifdef _MSC_VER + +#if _MSC_VER >= 1300 +#define MY_NO_INLINE __declspec(noinline) +#else +#define MY_NO_INLINE +#endif + +#define MY_FORCE_INLINE __forceinline + +#define MY_CDECL __cdecl +#define MY_FAST_CALL __fastcall + +#else + +#define MY_NO_INLINE +#define MY_FORCE_INLINE +#define MY_CDECL +#define MY_FAST_CALL + +/* inline keyword : for C++ / C99 */ + +/* GCC, clang: */ +/* +#if defined (__GNUC__) && (__GNUC__ >= 4) +#define MY_FORCE_INLINE __attribute__((always_inline)) +#define MY_NO_INLINE __attribute__((noinline)) +#endif +*/ + +#endif + + +/* The following interfaces use first parameter as pointer to structure */ + +typedef struct IByteIn IByteIn; +struct IByteIn +{ + Byte (*Read)(const IByteIn *p); /* reads one byte, returns 0 in case of EOF or error */ +}; +#define IByteIn_Read(p) (p)->Read(p) + + +typedef struct IByteOut IByteOut; +struct IByteOut +{ + void (*Write)(const IByteOut *p, Byte b); +}; +#define IByteOut_Write(p, b) (p)->Write(p, b) + + +typedef struct ISeqInStream ISeqInStream; +struct ISeqInStream +{ + SRes (*Read)(const ISeqInStream *p, void *buf, size_t *size); + /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. + (output(*size) < input(*size)) is allowed */ +}; +#define ISeqInStream_Read(p, buf, size) (p)->Read(p, buf, size) + +/* it can return SZ_ERROR_INPUT_EOF */ +SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size); +SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType); +SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf); + + +typedef struct ISeqOutStream ISeqOutStream; +struct ISeqOutStream +{ + size_t (*Write)(const ISeqOutStream *p, const void *buf, size_t size); + /* Returns: result - the number of actually written bytes. + (result < size) means error */ +}; +#define ISeqOutStream_Write(p, buf, size) (p)->Write(p, buf, size) + +typedef enum +{ + SZ_SEEK_SET = 0, + SZ_SEEK_CUR = 1, + SZ_SEEK_END = 2 +} ESzSeek; + + +typedef struct ISeekInStream ISeekInStream; +struct ISeekInStream +{ + SRes (*Read)(const ISeekInStream *p, void *buf, size_t *size); /* same as ISeqInStream::Read */ + SRes (*Seek)(const ISeekInStream *p, Int64 *pos, ESzSeek origin); +}; +#define ISeekInStream_Read(p, buf, size) (p)->Read(p, buf, size) +#define ISeekInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin) + + +typedef struct ILookInStream ILookInStream; +struct ILookInStream +{ + SRes (*Look)(const ILookInStream *p, const void **buf, size_t *size); + /* if (input(*size) != 0 && output(*size) == 0) means end_of_stream. + (output(*size) > input(*size)) is not allowed + (output(*size) < input(*size)) is allowed */ + SRes (*Skip)(const ILookInStream *p, size_t offset); + /* offset must be <= output(*size) of Look */ + + SRes (*Read)(const ILookInStream *p, void *buf, size_t *size); + /* reads directly (without buffer). It's same as ISeqInStream::Read */ + SRes (*Seek)(const ILookInStream *p, Int64 *pos, ESzSeek origin); +}; + +#define ILookInStream_Look(p, buf, size) (p)->Look(p, buf, size) +#define ILookInStream_Skip(p, offset) (p)->Skip(p, offset) +#define ILookInStream_Read(p, buf, size) (p)->Read(p, buf, size) +#define ILookInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin) + + +SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size); +SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset); + +/* reads via ILookInStream::Read */ +SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType); +SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size); + + + +typedef struct +{ + ILookInStream vt; + const ISeekInStream *realStream; + + size_t pos; + size_t size; /* it's data size */ + + /* the following variables must be set outside */ + Byte *buf; + size_t bufSize; +} CLookToRead2; + +void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead); + +#define LookToRead2_Init(p) { (p)->pos = (p)->size = 0; } + + +typedef struct +{ + ISeqInStream vt; + const ILookInStream *realStream; +} CSecToLook; + +void SecToLook_CreateVTable(CSecToLook *p); + + + +typedef struct +{ + ISeqInStream vt; + const ILookInStream *realStream; +} CSecToRead; + +void SecToRead_CreateVTable(CSecToRead *p); + + +typedef struct ICompressProgress ICompressProgress; + +struct ICompressProgress +{ + SRes (*Progress)(const ICompressProgress *p, UInt64 inSize, UInt64 outSize); + /* Returns: result. (result != SZ_OK) means break. + Value (UInt64)(Int64)-1 for size means unknown value. */ +}; +#define ICompressProgress_Progress(p, inSize, outSize) (p)->Progress(p, inSize, outSize) + + + +typedef struct ISzAlloc ISzAlloc; +typedef const ISzAlloc * ISzAllocPtr; + +struct ISzAlloc +{ + void *(*Alloc)(ISzAllocPtr p, size_t size); + void (*Free)(ISzAllocPtr p, void *address); /* address can be 0 */ +}; + +#define ISzAlloc_Alloc(p, size) (p)->Alloc(p, size) +#define ISzAlloc_Free(p, a) (p)->Free(p, a) + +/* deprecated */ +#define IAlloc_Alloc(p, size) ISzAlloc_Alloc(p, size) +#define IAlloc_Free(p, a) ISzAlloc_Free(p, a) + + + + + +#ifndef MY_offsetof + #ifdef offsetof + #define MY_offsetof(type, m) offsetof(type, m) + /* + #define MY_offsetof(type, m) FIELD_OFFSET(type, m) + */ + #else + #define MY_offsetof(type, m) ((size_t)&(((type *)0)->m)) + #endif +#endif + + + +#ifndef MY_container_of + +/* +#define MY_container_of(ptr, type, m) container_of(ptr, type, m) +#define MY_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m) +#define MY_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m))) +#define MY_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m)))) +*/ + +/* + GCC shows warning: "perhaps the 'offsetof' macro was used incorrectly" + GCC 3.4.4 : classes with constructor + GCC 4.8.1 : classes with non-public variable members" +*/ + +#define MY_container_of(ptr, type, m) ((type *)((char *)(1 ? (ptr) : &((type *)0)->m) - MY_offsetof(type, m))) + + +#endif + +#define CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(ptr)) + +/* +#define CONTAINER_FROM_VTBL(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) +*/ +#define CONTAINER_FROM_VTBL(ptr, type, m) MY_container_of(ptr, type, m) + +#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) +/* +#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL(ptr, type, m) +*/ + + + +#ifdef _WIN32 + +#define CHAR_PATH_SEPARATOR '\\' +#define WCHAR_PATH_SEPARATOR L'\\' +#define STRING_PATH_SEPARATOR "\\" +#define WSTRING_PATH_SEPARATOR L"\\" + +#else + +#define CHAR_PATH_SEPARATOR '/' +#define WCHAR_PATH_SEPARATOR L'/' +#define STRING_PATH_SEPARATOR "/" +#define WSTRING_PATH_SEPARATOR L"/" + +#endif + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/7zVersion.h b/3rdparty/7z/src/7zVersion.h new file mode 100644 index 0000000000..0074c64be9 --- /dev/null +++ b/3rdparty/7z/src/7zVersion.h @@ -0,0 +1,27 @@ +#define MY_VER_MAJOR 19 +#define MY_VER_MINOR 00 +#define MY_VER_BUILD 0 +#define MY_VERSION_NUMBERS "19.00" +#define MY_VERSION MY_VERSION_NUMBERS + +#ifdef MY_CPU_NAME + #define MY_VERSION_CPU MY_VERSION " (" MY_CPU_NAME ")" +#else + #define MY_VERSION_CPU MY_VERSION +#endif + +#define MY_DATE "2019-02-21" +#undef MY_COPYRIGHT +#undef MY_VERSION_COPYRIGHT_DATE +#define MY_AUTHOR_NAME "Igor Pavlov" +#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain" +#define MY_COPYRIGHT_CR "Copyright (c) 1999-2018 Igor Pavlov" + +#ifdef USE_COPYRIGHT_CR + #define MY_COPYRIGHT MY_COPYRIGHT_CR +#else + #define MY_COPYRIGHT MY_COPYRIGHT_PD +#endif + +#define MY_COPYRIGHT_DATE MY_COPYRIGHT " : " MY_DATE +#define MY_VERSION_COPYRIGHT_DATE MY_VERSION_CPU " : " MY_COPYRIGHT " : " MY_DATE diff --git a/3rdparty/7z/src/7zVersion.rc b/3rdparty/7z/src/7zVersion.rc new file mode 100644 index 0000000000..6ed26de744 --- /dev/null +++ b/3rdparty/7z/src/7zVersion.rc @@ -0,0 +1,55 @@ +#define MY_VS_FFI_FILEFLAGSMASK 0x0000003FL +#define MY_VOS_NT_WINDOWS32 0x00040004L +#define MY_VOS_CE_WINDOWS32 0x00050004L + +#define MY_VFT_APP 0x00000001L +#define MY_VFT_DLL 0x00000002L + +// #include + +#ifndef MY_VERSION +#include "7zVersion.h" +#endif + +#define MY_VER MY_VER_MAJOR,MY_VER_MINOR,MY_VER_BUILD,0 + +#ifdef DEBUG +#define DBG_FL VS_FF_DEBUG +#else +#define DBG_FL 0 +#endif + +#define MY_VERSION_INFO(fileType, descr, intName, origName) \ +LANGUAGE 9, 1 \ +1 VERSIONINFO \ + FILEVERSION MY_VER \ + PRODUCTVERSION MY_VER \ + FILEFLAGSMASK MY_VS_FFI_FILEFLAGSMASK \ + FILEFLAGS DBG_FL \ + FILEOS MY_VOS_NT_WINDOWS32 \ + FILETYPE fileType \ + FILESUBTYPE 0x0L \ +BEGIN \ + BLOCK "StringFileInfo" \ + BEGIN \ + BLOCK "040904b0" \ + BEGIN \ + VALUE "CompanyName", "Igor Pavlov" \ + VALUE "FileDescription", descr \ + VALUE "FileVersion", MY_VERSION \ + VALUE "InternalName", intName \ + VALUE "LegalCopyright", MY_COPYRIGHT \ + VALUE "OriginalFilename", origName \ + VALUE "ProductName", "7-Zip" \ + VALUE "ProductVersion", MY_VERSION \ + END \ + END \ + BLOCK "VarFileInfo" \ + BEGIN \ + VALUE "Translation", 0x409, 1200 \ + END \ +END + +#define MY_VERSION_INFO_APP(descr, intName) MY_VERSION_INFO(MY_VFT_APP, descr, intName, intName ".exe") + +#define MY_VERSION_INFO_DLL(descr, intName) MY_VERSION_INFO(MY_VFT_DLL, descr, intName, intName ".dll") diff --git a/3rdparty/7z/src/Aes.c b/3rdparty/7z/src/Aes.c new file mode 100644 index 0000000000..8f7d50ea24 --- /dev/null +++ b/3rdparty/7z/src/Aes.c @@ -0,0 +1,306 @@ +/* Aes.c -- AES encryption / decryption +2017-01-24 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "Aes.h" +#include "CpuArch.h" + +static UInt32 T[256 * 4]; +static const Byte Sbox[256] = { + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, + 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, + 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, + 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, + 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, + 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, + 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, + 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, + 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, + 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, + 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, + 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, + 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, + 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; + +void MY_FAST_CALL AesCbc_Encode(UInt32 *ivAes, Byte *data, size_t numBlocks); +void MY_FAST_CALL AesCbc_Decode(UInt32 *ivAes, Byte *data, size_t numBlocks); +void MY_FAST_CALL AesCtr_Code(UInt32 *ivAes, Byte *data, size_t numBlocks); + +void MY_FAST_CALL AesCbc_Encode_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks); +void MY_FAST_CALL AesCbc_Decode_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks); +void MY_FAST_CALL AesCtr_Code_Intel(UInt32 *ivAes, Byte *data, size_t numBlocks); + +AES_CODE_FUNC g_AesCbc_Encode; +AES_CODE_FUNC g_AesCbc_Decode; +AES_CODE_FUNC g_AesCtr_Code; + +static UInt32 D[256 * 4]; +static Byte InvS[256]; + +static const Byte Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; + +#define xtime(x) ((((x) << 1) ^ (((x) & 0x80) != 0 ? 0x1B : 0)) & 0xFF) + +#define Ui32(a0, a1, a2, a3) ((UInt32)(a0) | ((UInt32)(a1) << 8) | ((UInt32)(a2) << 16) | ((UInt32)(a3) << 24)) + +#define gb0(x) ( (x) & 0xFF) +#define gb1(x) (((x) >> ( 8)) & 0xFF) +#define gb2(x) (((x) >> (16)) & 0xFF) +#define gb3(x) (((x) >> (24))) + +#define gb(n, x) gb ## n(x) + +#define TT(x) (T + (x << 8)) +#define DD(x) (D + (x << 8)) + + +void AesGenTables(void) +{ + unsigned i; + for (i = 0; i < 256; i++) + InvS[Sbox[i]] = (Byte)i; + + for (i = 0; i < 256; i++) + { + { + UInt32 a1 = Sbox[i]; + UInt32 a2 = xtime(a1); + UInt32 a3 = a2 ^ a1; + TT(0)[i] = Ui32(a2, a1, a1, a3); + TT(1)[i] = Ui32(a3, a2, a1, a1); + TT(2)[i] = Ui32(a1, a3, a2, a1); + TT(3)[i] = Ui32(a1, a1, a3, a2); + } + { + UInt32 a1 = InvS[i]; + UInt32 a2 = xtime(a1); + UInt32 a4 = xtime(a2); + UInt32 a8 = xtime(a4); + UInt32 a9 = a8 ^ a1; + UInt32 aB = a8 ^ a2 ^ a1; + UInt32 aD = a8 ^ a4 ^ a1; + UInt32 aE = a8 ^ a4 ^ a2; + DD(0)[i] = Ui32(aE, a9, aD, aB); + DD(1)[i] = Ui32(aB, aE, a9, aD); + DD(2)[i] = Ui32(aD, aB, aE, a9); + DD(3)[i] = Ui32(a9, aD, aB, aE); + } + } + + g_AesCbc_Encode = AesCbc_Encode; + g_AesCbc_Decode = AesCbc_Decode; + g_AesCtr_Code = AesCtr_Code; + + #ifdef MY_CPU_X86_OR_AMD64 + if (CPU_Is_Aes_Supported()) + { + g_AesCbc_Encode = AesCbc_Encode_Intel; + g_AesCbc_Decode = AesCbc_Decode_Intel; + g_AesCtr_Code = AesCtr_Code_Intel; + } + #endif +} + + +#define HT(i, x, s) TT(x)[gb(x, s[(i + x) & 3])] + +#define HT4(m, i, s, p) m[i] = \ + HT(i, 0, s) ^ \ + HT(i, 1, s) ^ \ + HT(i, 2, s) ^ \ + HT(i, 3, s) ^ w[p + i] + +#define HT16(m, s, p) \ + HT4(m, 0, s, p); \ + HT4(m, 1, s, p); \ + HT4(m, 2, s, p); \ + HT4(m, 3, s, p); \ + +#define FT(i, x) Sbox[gb(x, m[(i + x) & 3])] +#define FT4(i) dest[i] = Ui32(FT(i, 0), FT(i, 1), FT(i, 2), FT(i, 3)) ^ w[i]; + + +#define HD(i, x, s) DD(x)[gb(x, s[(i - x) & 3])] + +#define HD4(m, i, s, p) m[i] = \ + HD(i, 0, s) ^ \ + HD(i, 1, s) ^ \ + HD(i, 2, s) ^ \ + HD(i, 3, s) ^ w[p + i]; + +#define HD16(m, s, p) \ + HD4(m, 0, s, p); \ + HD4(m, 1, s, p); \ + HD4(m, 2, s, p); \ + HD4(m, 3, s, p); \ + +#define FD(i, x) InvS[gb(x, m[(i - x) & 3])] +#define FD4(i) dest[i] = Ui32(FD(i, 0), FD(i, 1), FD(i, 2), FD(i, 3)) ^ w[i]; + +void MY_FAST_CALL Aes_SetKey_Enc(UInt32 *w, const Byte *key, unsigned keySize) +{ + unsigned i, wSize; + wSize = keySize + 28; + keySize /= 4; + w[0] = ((UInt32)keySize / 2) + 3; + w += 4; + + for (i = 0; i < keySize; i++, key += 4) + w[i] = GetUi32(key); + + for (; i < wSize; i++) + { + UInt32 t = w[(size_t)i - 1]; + unsigned rem = i % keySize; + if (rem == 0) + t = Ui32(Sbox[gb1(t)] ^ Rcon[i / keySize], Sbox[gb2(t)], Sbox[gb3(t)], Sbox[gb0(t)]); + else if (keySize > 6 && rem == 4) + t = Ui32(Sbox[gb0(t)], Sbox[gb1(t)], Sbox[gb2(t)], Sbox[gb3(t)]); + w[i] = w[i - keySize] ^ t; + } +} + +void MY_FAST_CALL Aes_SetKey_Dec(UInt32 *w, const Byte *key, unsigned keySize) +{ + unsigned i, num; + Aes_SetKey_Enc(w, key, keySize); + num = keySize + 20; + w += 8; + for (i = 0; i < num; i++) + { + UInt32 r = w[i]; + w[i] = + DD(0)[Sbox[gb0(r)]] ^ + DD(1)[Sbox[gb1(r)]] ^ + DD(2)[Sbox[gb2(r)]] ^ + DD(3)[Sbox[gb3(r)]]; + } +} + +/* Aes_Encode and Aes_Decode functions work with little-endian words. + src and dest are pointers to 4 UInt32 words. + src and dest can point to same block */ + +static void Aes_Encode(const UInt32 *w, UInt32 *dest, const UInt32 *src) +{ + UInt32 s[4]; + UInt32 m[4]; + UInt32 numRounds2 = w[0]; + w += 4; + s[0] = src[0] ^ w[0]; + s[1] = src[1] ^ w[1]; + s[2] = src[2] ^ w[2]; + s[3] = src[3] ^ w[3]; + w += 4; + for (;;) + { + HT16(m, s, 0); + if (--numRounds2 == 0) + break; + HT16(s, m, 4); + w += 8; + } + w += 4; + FT4(0); FT4(1); FT4(2); FT4(3); +} + +static void Aes_Decode(const UInt32 *w, UInt32 *dest, const UInt32 *src) +{ + UInt32 s[4]; + UInt32 m[4]; + UInt32 numRounds2 = w[0]; + w += 4 + numRounds2 * 8; + s[0] = src[0] ^ w[0]; + s[1] = src[1] ^ w[1]; + s[2] = src[2] ^ w[2]; + s[3] = src[3] ^ w[3]; + for (;;) + { + w -= 8; + HD16(m, s, 4); + if (--numRounds2 == 0) + break; + HD16(s, m, 0); + } + FD4(0); FD4(1); FD4(2); FD4(3); +} + +void AesCbc_Init(UInt32 *p, const Byte *iv) +{ + unsigned i; + for (i = 0; i < 4; i++) + p[i] = GetUi32(iv + i * 4); +} + +void MY_FAST_CALL AesCbc_Encode(UInt32 *p, Byte *data, size_t numBlocks) +{ + for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE) + { + p[0] ^= GetUi32(data); + p[1] ^= GetUi32(data + 4); + p[2] ^= GetUi32(data + 8); + p[3] ^= GetUi32(data + 12); + + Aes_Encode(p + 4, p, p); + + SetUi32(data, p[0]); + SetUi32(data + 4, p[1]); + SetUi32(data + 8, p[2]); + SetUi32(data + 12, p[3]); + } +} + +void MY_FAST_CALL AesCbc_Decode(UInt32 *p, Byte *data, size_t numBlocks) +{ + UInt32 in[4], out[4]; + for (; numBlocks != 0; numBlocks--, data += AES_BLOCK_SIZE) + { + in[0] = GetUi32(data); + in[1] = GetUi32(data + 4); + in[2] = GetUi32(data + 8); + in[3] = GetUi32(data + 12); + + Aes_Decode(p + 4, out, in); + + SetUi32(data, p[0] ^ out[0]); + SetUi32(data + 4, p[1] ^ out[1]); + SetUi32(data + 8, p[2] ^ out[2]); + SetUi32(data + 12, p[3] ^ out[3]); + + p[0] = in[0]; + p[1] = in[1]; + p[2] = in[2]; + p[3] = in[3]; + } +} + +void MY_FAST_CALL AesCtr_Code(UInt32 *p, Byte *data, size_t numBlocks) +{ + for (; numBlocks != 0; numBlocks--) + { + UInt32 temp[4]; + unsigned i; + + if (++p[0] == 0) + p[1]++; + + Aes_Encode(p + 4, temp, p); + + for (i = 0; i < 4; i++, data += 4) + { + UInt32 t = temp[i]; + + #ifdef MY_CPU_LE_UNALIGN + *((UInt32 *)data) ^= t; + #else + data[0] ^= (t & 0xFF); + data[1] ^= ((t >> 8) & 0xFF); + data[2] ^= ((t >> 16) & 0xFF); + data[3] ^= ((t >> 24)); + #endif + } + } +} diff --git a/3rdparty/7z/src/Aes.h b/3rdparty/7z/src/Aes.h new file mode 100644 index 0000000000..381e979d1b --- /dev/null +++ b/3rdparty/7z/src/Aes.h @@ -0,0 +1,38 @@ +/* Aes.h -- AES encryption / decryption +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __AES_H +#define __AES_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define AES_BLOCK_SIZE 16 + +/* Call AesGenTables one time before other AES functions */ +void AesGenTables(void); + +/* UInt32 pointers must be 16-byte aligned */ + +/* 16-byte (4 * 32-bit words) blocks: 1 (IV) + 1 (keyMode) + 15 (AES-256 roundKeys) */ +#define AES_NUM_IVMRK_WORDS ((1 + 1 + 15) * 4) + +/* aes - 16-byte aligned pointer to keyMode+roundKeys sequence */ +/* keySize = 16 or 24 or 32 (bytes) */ +typedef void (MY_FAST_CALL *AES_SET_KEY_FUNC)(UInt32 *aes, const Byte *key, unsigned keySize); +void MY_FAST_CALL Aes_SetKey_Enc(UInt32 *aes, const Byte *key, unsigned keySize); +void MY_FAST_CALL Aes_SetKey_Dec(UInt32 *aes, const Byte *key, unsigned keySize); + +/* ivAes - 16-byte aligned pointer to iv+keyMode+roundKeys sequence: UInt32[AES_NUM_IVMRK_WORDS] */ +void AesCbc_Init(UInt32 *ivAes, const Byte *iv); /* iv size is AES_BLOCK_SIZE */ +/* data - 16-byte aligned pointer to data */ +/* numBlocks - the number of 16-byte blocks in data array */ +typedef void (MY_FAST_CALL *AES_CODE_FUNC)(UInt32 *ivAes, Byte *data, size_t numBlocks); +extern AES_CODE_FUNC g_AesCbc_Encode; +extern AES_CODE_FUNC g_AesCbc_Decode; +extern AES_CODE_FUNC g_AesCtr_Code; + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/AesOpt.c b/3rdparty/7z/src/AesOpt.c new file mode 100644 index 0000000000..0e7f49a1b0 --- /dev/null +++ b/3rdparty/7z/src/AesOpt.c @@ -0,0 +1,184 @@ +/* AesOpt.c -- Intel's AES +2017-06-08 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "CpuArch.h" + +#ifdef MY_CPU_X86_OR_AMD64 +#if (_MSC_VER > 1500) || (_MSC_FULL_VER >= 150030729) +#define USE_INTEL_AES +#endif +#endif + +#ifdef USE_INTEL_AES + +#include + +void MY_FAST_CALL AesCbc_Encode_Intel(__m128i *p, __m128i *data, size_t numBlocks) +{ + __m128i m = *p; + for (; numBlocks != 0; numBlocks--, data++) + { + UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1; + const __m128i *w = p + 3; + m = _mm_xor_si128(m, *data); + m = _mm_xor_si128(m, p[2]); + do + { + m = _mm_aesenc_si128(m, w[0]); + m = _mm_aesenc_si128(m, w[1]); + w += 2; + } + while (--numRounds2 != 0); + m = _mm_aesenc_si128(m, w[0]); + m = _mm_aesenclast_si128(m, w[1]); + *data = m; + } + *p = m; +} + +#define NUM_WAYS 3 + +#define AES_OP_W(op, n) { \ + const __m128i t = w[n]; \ + m0 = op(m0, t); \ + m1 = op(m1, t); \ + m2 = op(m2, t); \ + } + +#define AES_DEC(n) AES_OP_W(_mm_aesdec_si128, n) +#define AES_DEC_LAST(n) AES_OP_W(_mm_aesdeclast_si128, n) +#define AES_ENC(n) AES_OP_W(_mm_aesenc_si128, n) +#define AES_ENC_LAST(n) AES_OP_W(_mm_aesenclast_si128, n) + +void MY_FAST_CALL AesCbc_Decode_Intel(__m128i *p, __m128i *data, size_t numBlocks) +{ + __m128i iv = *p; + for (; numBlocks >= NUM_WAYS; numBlocks -= NUM_WAYS, data += NUM_WAYS) + { + UInt32 numRounds2 = *(const UInt32 *)(p + 1); + const __m128i *w = p + numRounds2 * 2; + __m128i m0, m1, m2; + { + const __m128i t = w[2]; + m0 = _mm_xor_si128(t, data[0]); + m1 = _mm_xor_si128(t, data[1]); + m2 = _mm_xor_si128(t, data[2]); + } + numRounds2--; + do + { + AES_DEC(1) + AES_DEC(0) + w -= 2; + } + while (--numRounds2 != 0); + AES_DEC(1) + AES_DEC_LAST(0) + + { + __m128i t; + t = _mm_xor_si128(m0, iv); iv = data[0]; data[0] = t; + t = _mm_xor_si128(m1, iv); iv = data[1]; data[1] = t; + t = _mm_xor_si128(m2, iv); iv = data[2]; data[2] = t; + } + } + for (; numBlocks != 0; numBlocks--, data++) + { + UInt32 numRounds2 = *(const UInt32 *)(p + 1); + const __m128i *w = p + numRounds2 * 2; + __m128i m = _mm_xor_si128(w[2], *data); + numRounds2--; + do + { + m = _mm_aesdec_si128(m, w[1]); + m = _mm_aesdec_si128(m, w[0]); + w -= 2; + } + while (--numRounds2 != 0); + m = _mm_aesdec_si128(m, w[1]); + m = _mm_aesdeclast_si128(m, w[0]); + + m = _mm_xor_si128(m, iv); + iv = *data; + *data = m; + } + *p = iv; +} + +void MY_FAST_CALL AesCtr_Code_Intel(__m128i *p, __m128i *data, size_t numBlocks) +{ + __m128i ctr = *p; + __m128i one; + one.m128i_u64[0] = 1; + one.m128i_u64[1] = 0; + for (; numBlocks >= NUM_WAYS; numBlocks -= NUM_WAYS, data += NUM_WAYS) + { + UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1; + const __m128i *w = p; + __m128i m0, m1, m2; + { + const __m128i t = w[2]; + ctr = _mm_add_epi64(ctr, one); m0 = _mm_xor_si128(ctr, t); + ctr = _mm_add_epi64(ctr, one); m1 = _mm_xor_si128(ctr, t); + ctr = _mm_add_epi64(ctr, one); m2 = _mm_xor_si128(ctr, t); + } + w += 3; + do + { + AES_ENC(0) + AES_ENC(1) + w += 2; + } + while (--numRounds2 != 0); + AES_ENC(0) + AES_ENC_LAST(1) + data[0] = _mm_xor_si128(data[0], m0); + data[1] = _mm_xor_si128(data[1], m1); + data[2] = _mm_xor_si128(data[2], m2); + } + for (; numBlocks != 0; numBlocks--, data++) + { + UInt32 numRounds2 = *(const UInt32 *)(p + 1) - 1; + const __m128i *w = p; + __m128i m; + ctr = _mm_add_epi64(ctr, one); + m = _mm_xor_si128(ctr, p[2]); + w += 3; + do + { + m = _mm_aesenc_si128(m, w[0]); + m = _mm_aesenc_si128(m, w[1]); + w += 2; + } + while (--numRounds2 != 0); + m = _mm_aesenc_si128(m, w[0]); + m = _mm_aesenclast_si128(m, w[1]); + *data = _mm_xor_si128(*data, m); + } + *p = ctr; +} + +#else + +void MY_FAST_CALL AesCbc_Encode(UInt32 *ivAes, Byte *data, size_t numBlocks); +void MY_FAST_CALL AesCbc_Decode(UInt32 *ivAes, Byte *data, size_t numBlocks); +void MY_FAST_CALL AesCtr_Code(UInt32 *ivAes, Byte *data, size_t numBlocks); + +void MY_FAST_CALL AesCbc_Encode_Intel(UInt32 *p, Byte *data, size_t numBlocks) +{ + AesCbc_Encode(p, data, numBlocks); +} + +void MY_FAST_CALL AesCbc_Decode_Intel(UInt32 *p, Byte *data, size_t numBlocks) +{ + AesCbc_Decode(p, data, numBlocks); +} + +void MY_FAST_CALL AesCtr_Code_Intel(UInt32 *p, Byte *data, size_t numBlocks) +{ + AesCtr_Code(p, data, numBlocks); +} + +#endif diff --git a/3rdparty/7z/src/Alloc.c b/3rdparty/7z/src/Alloc.c new file mode 100644 index 0000000000..30b499e5ff --- /dev/null +++ b/3rdparty/7z/src/Alloc.c @@ -0,0 +1,455 @@ +/* Alloc.c -- Memory allocation functions +2018-04-27 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#ifdef _WIN32 +#include +#endif +#include + +#include "Alloc.h" + +/* #define _SZ_ALLOC_DEBUG */ + +/* use _SZ_ALLOC_DEBUG to debug alloc/free operations */ +#ifdef _SZ_ALLOC_DEBUG + +#include +int g_allocCount = 0; +int g_allocCountMid = 0; +int g_allocCountBig = 0; + + +#define CONVERT_INT_TO_STR(charType, tempSize) \ + unsigned char temp[tempSize]; unsigned i = 0; \ + while (val >= 10) { temp[i++] = (unsigned char)('0' + (unsigned)(val % 10)); val /= 10; } \ + *s++ = (charType)('0' + (unsigned)val); \ + while (i != 0) { i--; *s++ = temp[i]; } \ + *s = 0; + +static void ConvertUInt64ToString(UInt64 val, char *s) +{ + CONVERT_INT_TO_STR(char, 24); +} + +#define GET_HEX_CHAR(t) ((char)(((t < 10) ? ('0' + t) : ('A' + (t - 10))))) + +static void ConvertUInt64ToHex(UInt64 val, char *s) +{ + UInt64 v = val; + unsigned i; + for (i = 1;; i++) + { + v >>= 4; + if (v == 0) + break; + } + s[i] = 0; + do + { + unsigned t = (unsigned)(val & 0xF); + val >>= 4; + s[--i] = GET_HEX_CHAR(t); + } + while (i); +} + +#define DEBUG_OUT_STREAM stderr + +static void Print(const char *s) +{ + fputs(s, DEBUG_OUT_STREAM); +} + +static void PrintAligned(const char *s, size_t align) +{ + size_t len = strlen(s); + for(;;) + { + fputc(' ', DEBUG_OUT_STREAM); + if (len >= align) + break; + ++len; + } + Print(s); +} + +static void PrintLn() +{ + Print("\n"); +} + +static void PrintHex(UInt64 v, size_t align) +{ + char s[32]; + ConvertUInt64ToHex(v, s); + PrintAligned(s, align); +} + +static void PrintDec(UInt64 v, size_t align) +{ + char s[32]; + ConvertUInt64ToString(v, s); + PrintAligned(s, align); +} + +static void PrintAddr(void *p) +{ + PrintHex((UInt64)(size_t)(ptrdiff_t)p, 12); +} + + +#define PRINT_ALLOC(name, cnt, size, ptr) \ + Print(name " "); \ + PrintDec(cnt++, 10); \ + PrintHex(size, 10); \ + PrintAddr(ptr); \ + PrintLn(); + +#define PRINT_FREE(name, cnt, ptr) if (ptr) { \ + Print(name " "); \ + PrintDec(--cnt, 10); \ + PrintAddr(ptr); \ + PrintLn(); } + +#else + +#define PRINT_ALLOC(name, cnt, size, ptr) +#define PRINT_FREE(name, cnt, ptr) +#define Print(s) +#define PrintLn() +#define PrintHex(v, align) +#define PrintDec(v, align) +#define PrintAddr(p) + +#endif + + + +void *MyAlloc(size_t size) +{ + if (size == 0) + return NULL; + #ifdef _SZ_ALLOC_DEBUG + { + void *p = malloc(size); + PRINT_ALLOC("Alloc ", g_allocCount, size, p); + return p; + } + #else + return malloc(size); + #endif +} + +void MyFree(void *address) +{ + PRINT_FREE("Free ", g_allocCount, address); + + free(address); +} + +#ifdef _WIN32 + +void *MidAlloc(size_t size) +{ + if (size == 0) + return NULL; + + PRINT_ALLOC("Alloc-Mid", g_allocCountMid, size, NULL); + + return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); +} + +void MidFree(void *address) +{ + PRINT_FREE("Free-Mid", g_allocCountMid, address); + + if (!address) + return; + VirtualFree(address, 0, MEM_RELEASE); +} + +#ifndef MEM_LARGE_PAGES +#undef _7ZIP_LARGE_PAGES +#endif + +#ifdef _7ZIP_LARGE_PAGES +SIZE_T g_LargePageSize = 0; +typedef SIZE_T (WINAPI *GetLargePageMinimumP)(); +#endif + +void SetLargePageSize() +{ + #ifdef _7ZIP_LARGE_PAGES + SIZE_T size; + GetLargePageMinimumP largePageMinimum = (GetLargePageMinimumP) + GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "GetLargePageMinimum"); + if (!largePageMinimum) + return; + size = largePageMinimum(); + if (size == 0 || (size & (size - 1)) != 0) + return; + g_LargePageSize = size; + #endif +} + + +void *BigAlloc(size_t size) +{ + if (size == 0) + return NULL; + + PRINT_ALLOC("Alloc-Big", g_allocCountBig, size, NULL); + + #ifdef _7ZIP_LARGE_PAGES + { + SIZE_T ps = g_LargePageSize; + if (ps != 0 && ps <= (1 << 30) && size > (ps / 2)) + { + size_t size2; + ps--; + size2 = (size + ps) & ~ps; + if (size2 >= size) + { + void *res = VirtualAlloc(NULL, size2, MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE); + if (res) + return res; + } + } + } + #endif + + return VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); +} + +void BigFree(void *address) +{ + PRINT_FREE("Free-Big", g_allocCountBig, address); + + if (!address) + return; + VirtualFree(address, 0, MEM_RELEASE); +} + +#endif + + +static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MyAlloc(size); } +static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MyFree(address); } +const ISzAlloc g_Alloc = { SzAlloc, SzFree }; + +static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MidAlloc(size); } +static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MidFree(address); } +const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree }; + +static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return BigAlloc(size); } +static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); BigFree(address); } +const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree }; + + +/* + uintptr_t : C99 (optional) + : unsupported in VS6 +*/ + +#ifdef _WIN32 + typedef UINT_PTR UIntPtr; +#else + /* + typedef uintptr_t UIntPtr; + */ + typedef ptrdiff_t UIntPtr; +#endif + + +#define ADJUST_ALLOC_SIZE 0 +/* +#define ADJUST_ALLOC_SIZE (sizeof(void *) - 1) +*/ +/* + Use (ADJUST_ALLOC_SIZE = (sizeof(void *) - 1)), if + MyAlloc() can return address that is NOT multiple of sizeof(void *). +*/ + + +/* +#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((char *)(p) - ((size_t)(UIntPtr)(p) & ((align) - 1)))) +*/ +#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((((UIntPtr)(p)) & ~((UIntPtr)(align) - 1)))) + +#define MY_ALIGN_PTR_UP_PLUS(p, align) MY_ALIGN_PTR_DOWN(((char *)(p) + (align) + ADJUST_ALLOC_SIZE), align) + + +#if (_POSIX_C_SOURCE >= 200112L) && !defined(_WIN32) + #define USE_posix_memalign +#endif + +/* + This posix_memalign() is for test purposes only. + We also need special Free() function instead of free(), + if this posix_memalign() is used. +*/ + +/* +static int posix_memalign(void **ptr, size_t align, size_t size) +{ + size_t newSize = size + align; + void *p; + void *pAligned; + *ptr = NULL; + if (newSize < size) + return 12; // ENOMEM + p = MyAlloc(newSize); + if (!p) + return 12; // ENOMEM + pAligned = MY_ALIGN_PTR_UP_PLUS(p, align); + ((void **)pAligned)[-1] = p; + *ptr = pAligned; + return 0; +} +*/ + +/* + ALLOC_ALIGN_SIZE >= sizeof(void *) + ALLOC_ALIGN_SIZE >= cache_line_size +*/ + +#define ALLOC_ALIGN_SIZE ((size_t)1 << 7) + +static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size) +{ + #ifndef USE_posix_memalign + + void *p; + void *pAligned; + size_t newSize; + UNUSED_VAR(pp); + + /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned + block to prevent cache line sharing with another allocated blocks */ + + newSize = size + ALLOC_ALIGN_SIZE * 1 + ADJUST_ALLOC_SIZE; + if (newSize < size) + return NULL; + + p = MyAlloc(newSize); + + if (!p) + return NULL; + pAligned = MY_ALIGN_PTR_UP_PLUS(p, ALLOC_ALIGN_SIZE); + + Print(" size="); PrintHex(size, 8); + Print(" a_size="); PrintHex(newSize, 8); + Print(" ptr="); PrintAddr(p); + Print(" a_ptr="); PrintAddr(pAligned); + PrintLn(); + + ((void **)pAligned)[-1] = p; + + return pAligned; + + #else + + void *p; + UNUSED_VAR(pp); + if (posix_memalign(&p, ALLOC_ALIGN_SIZE, size)) + return NULL; + + Print(" posix_memalign="); PrintAddr(p); + PrintLn(); + + return p; + + #endif +} + + +static void SzAlignedFree(ISzAllocPtr pp, void *address) +{ + UNUSED_VAR(pp); + #ifndef USE_posix_memalign + if (address) + MyFree(((void **)address)[-1]); + #else + free(address); + #endif +} + + +const ISzAlloc g_AlignedAlloc = { SzAlignedAlloc, SzAlignedFree }; + + + +#define MY_ALIGN_PTR_DOWN_1(p) MY_ALIGN_PTR_DOWN(p, sizeof(void *)) + +/* we align ptr to support cases where CAlignOffsetAlloc::offset is not multiply of sizeof(void *) */ +#define REAL_BLOCK_PTR_VAR(p) ((void **)MY_ALIGN_PTR_DOWN_1(p))[-1] +/* +#define REAL_BLOCK_PTR_VAR(p) ((void **)(p))[-1] +*/ + +static void *AlignOffsetAlloc_Alloc(ISzAllocPtr pp, size_t size) +{ + CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt); + void *adr; + void *pAligned; + size_t newSize; + size_t extra; + size_t alignSize = (size_t)1 << p->numAlignBits; + + if (alignSize < sizeof(void *)) + alignSize = sizeof(void *); + + if (p->offset >= alignSize) + return NULL; + + /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned + block to prevent cache line sharing with another allocated blocks */ + extra = p->offset & (sizeof(void *) - 1); + newSize = size + alignSize + extra + ADJUST_ALLOC_SIZE; + if (newSize < size) + return NULL; + + adr = ISzAlloc_Alloc(p->baseAlloc, newSize); + + if (!adr) + return NULL; + + pAligned = (char *)MY_ALIGN_PTR_DOWN((char *)adr + + alignSize - p->offset + extra + ADJUST_ALLOC_SIZE, alignSize) + p->offset; + + PrintLn(); + Print("- Aligned: "); + Print(" size="); PrintHex(size, 8); + Print(" a_size="); PrintHex(newSize, 8); + Print(" ptr="); PrintAddr(adr); + Print(" a_ptr="); PrintAddr(pAligned); + PrintLn(); + + REAL_BLOCK_PTR_VAR(pAligned) = adr; + + return pAligned; +} + + +static void AlignOffsetAlloc_Free(ISzAllocPtr pp, void *address) +{ + if (address) + { + CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt); + PrintLn(); + Print("- Aligned Free: "); + PrintLn(); + ISzAlloc_Free(p->baseAlloc, REAL_BLOCK_PTR_VAR(address)); + } +} + + +void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p) +{ + p->vt.Alloc = AlignOffsetAlloc_Alloc; + p->vt.Free = AlignOffsetAlloc_Free; +} diff --git a/3rdparty/7z/src/Alloc.h b/3rdparty/7z/src/Alloc.h new file mode 100644 index 0000000000..3d796e5eee --- /dev/null +++ b/3rdparty/7z/src/Alloc.h @@ -0,0 +1,51 @@ +/* Alloc.h -- Memory allocation functions +2018-02-19 : Igor Pavlov : Public domain */ + +#ifndef __COMMON_ALLOC_H +#define __COMMON_ALLOC_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +void *MyAlloc(size_t size); +void MyFree(void *address); + +#ifdef _WIN32 + +void SetLargePageSize(); + +void *MidAlloc(size_t size); +void MidFree(void *address); +void *BigAlloc(size_t size); +void BigFree(void *address); + +#else + +#define MidAlloc(size) MyAlloc(size) +#define MidFree(address) MyFree(address) +#define BigAlloc(size) MyAlloc(size) +#define BigFree(address) MyFree(address) + +#endif + +extern const ISzAlloc g_Alloc; +extern const ISzAlloc g_BigAlloc; +extern const ISzAlloc g_MidAlloc; +extern const ISzAlloc g_AlignedAlloc; + + +typedef struct +{ + ISzAlloc vt; + ISzAllocPtr baseAlloc; + unsigned numAlignBits; /* ((1 << numAlignBits) >= sizeof(void *)) */ + size_t offset; /* (offset == (k * sizeof(void *)) && offset < (1 << numAlignBits) */ +} CAlignOffsetAlloc; + +void AlignOffsetAlloc_CreateVTable(CAlignOffsetAlloc *p); + + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Bcj2.c b/3rdparty/7z/src/Bcj2.c new file mode 100644 index 0000000000..da93985cf6 --- /dev/null +++ b/3rdparty/7z/src/Bcj2.c @@ -0,0 +1,257 @@ +/* Bcj2.c -- BCJ2 Decoder (Converter for x86 code) +2018-04-28 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "Bcj2.h" +#include "CpuArch.h" + +#define CProb UInt16 + +#define kTopValue ((UInt32)1 << 24) +#define kNumModelBits 11 +#define kBitModelTotal (1 << kNumModelBits) +#define kNumMoveBits 5 + +#define _IF_BIT_0 ttt = *prob; bound = (p->range >> kNumModelBits) * ttt; if (p->code < bound) +#define _UPDATE_0 p->range = bound; *prob = (CProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); +#define _UPDATE_1 p->range -= bound; p->code -= bound; *prob = (CProb)(ttt - (ttt >> kNumMoveBits)); + +void Bcj2Dec_Init(CBcj2Dec *p) +{ + unsigned i; + + p->state = BCJ2_DEC_STATE_OK; + p->ip = 0; + p->temp[3] = 0; + p->range = 0; + p->code = 0; + for (i = 0; i < sizeof(p->probs) / sizeof(p->probs[0]); i++) + p->probs[i] = kBitModelTotal >> 1; +} + +SRes Bcj2Dec_Decode(CBcj2Dec *p) +{ + if (p->range <= 5) + { + p->state = BCJ2_DEC_STATE_OK; + for (; p->range != 5; p->range++) + { + if (p->range == 1 && p->code != 0) + return SZ_ERROR_DATA; + + if (p->bufs[BCJ2_STREAM_RC] == p->lims[BCJ2_STREAM_RC]) + { + p->state = BCJ2_STREAM_RC; + return SZ_OK; + } + + p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++; + } + + if (p->code == 0xFFFFFFFF) + return SZ_ERROR_DATA; + + p->range = 0xFFFFFFFF; + } + else if (p->state >= BCJ2_DEC_STATE_ORIG_0) + { + while (p->state <= BCJ2_DEC_STATE_ORIG_3) + { + Byte *dest = p->dest; + if (dest == p->destLim) + return SZ_OK; + *dest = p->temp[(size_t)p->state - BCJ2_DEC_STATE_ORIG_0]; + p->state++; + p->dest = dest + 1; + } + } + + /* + if (BCJ2_IS_32BIT_STREAM(p->state)) + { + const Byte *cur = p->bufs[p->state]; + if (cur == p->lims[p->state]) + return SZ_OK; + p->bufs[p->state] = cur + 4; + + { + UInt32 val; + Byte *dest; + SizeT rem; + + p->ip += 4; + val = GetBe32(cur) - p->ip; + dest = p->dest; + rem = p->destLim - dest; + if (rem < 4) + { + SizeT i; + SetUi32(p->temp, val); + for (i = 0; i < rem; i++) + dest[i] = p->temp[i]; + p->dest = dest + rem; + p->state = BCJ2_DEC_STATE_ORIG_0 + (unsigned)rem; + return SZ_OK; + } + SetUi32(dest, val); + p->temp[3] = (Byte)(val >> 24); + p->dest = dest + 4; + p->state = BCJ2_DEC_STATE_OK; + } + } + */ + + for (;;) + { + if (BCJ2_IS_32BIT_STREAM(p->state)) + p->state = BCJ2_DEC_STATE_OK; + else + { + if (p->range < kTopValue) + { + if (p->bufs[BCJ2_STREAM_RC] == p->lims[BCJ2_STREAM_RC]) + { + p->state = BCJ2_STREAM_RC; + return SZ_OK; + } + p->range <<= 8; + p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++; + } + + { + const Byte *src = p->bufs[BCJ2_STREAM_MAIN]; + const Byte *srcLim; + Byte *dest; + SizeT num = p->lims[BCJ2_STREAM_MAIN] - src; + + if (num == 0) + { + p->state = BCJ2_STREAM_MAIN; + return SZ_OK; + } + + dest = p->dest; + if (num > (SizeT)(p->destLim - dest)) + { + num = p->destLim - dest; + if (num == 0) + { + p->state = BCJ2_DEC_STATE_ORIG; + return SZ_OK; + } + } + + srcLim = src + num; + + if (p->temp[3] == 0x0F && (src[0] & 0xF0) == 0x80) + *dest = src[0]; + else for (;;) + { + Byte b = *src; + *dest = b; + if (b != 0x0F) + { + if ((b & 0xFE) == 0xE8) + break; + dest++; + if (++src != srcLim) + continue; + break; + } + dest++; + if (++src == srcLim) + break; + if ((*src & 0xF0) != 0x80) + continue; + *dest = *src; + break; + } + + num = src - p->bufs[BCJ2_STREAM_MAIN]; + + if (src == srcLim) + { + p->temp[3] = src[-1]; + p->bufs[BCJ2_STREAM_MAIN] = src; + p->ip += (UInt32)num; + p->dest += num; + p->state = + p->bufs[BCJ2_STREAM_MAIN] == + p->lims[BCJ2_STREAM_MAIN] ? + (unsigned)BCJ2_STREAM_MAIN : + (unsigned)BCJ2_DEC_STATE_ORIG; + return SZ_OK; + } + + { + UInt32 bound, ttt; + CProb *prob; + Byte b = src[0]; + Byte prev = (Byte)(num == 0 ? p->temp[3] : src[-1]); + + p->temp[3] = b; + p->bufs[BCJ2_STREAM_MAIN] = src + 1; + num++; + p->ip += (UInt32)num; + p->dest += num; + + prob = p->probs + (unsigned)(b == 0xE8 ? 2 + (unsigned)prev : (b == 0xE9 ? 1 : 0)); + + _IF_BIT_0 + { + _UPDATE_0 + continue; + } + _UPDATE_1 + + } + } + } + + { + UInt32 val; + unsigned cj = (p->temp[3] == 0xE8) ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP; + const Byte *cur = p->bufs[cj]; + Byte *dest; + SizeT rem; + + if (cur == p->lims[cj]) + { + p->state = cj; + break; + } + + val = GetBe32(cur); + p->bufs[cj] = cur + 4; + + p->ip += 4; + val -= p->ip; + dest = p->dest; + rem = p->destLim - dest; + + if (rem < 4) + { + p->temp[0] = (Byte)val; if (rem > 0) dest[0] = (Byte)val; val >>= 8; + p->temp[1] = (Byte)val; if (rem > 1) dest[1] = (Byte)val; val >>= 8; + p->temp[2] = (Byte)val; if (rem > 2) dest[2] = (Byte)val; val >>= 8; + p->temp[3] = (Byte)val; + p->dest = dest + rem; + p->state = BCJ2_DEC_STATE_ORIG_0 + (unsigned)rem; + break; + } + + SetUi32(dest, val); + p->temp[3] = (Byte)(val >> 24); + p->dest = dest + 4; + } + } + + if (p->range < kTopValue && p->bufs[BCJ2_STREAM_RC] != p->lims[BCJ2_STREAM_RC]) + { + p->range <<= 8; + p->code = (p->code << 8) | *(p->bufs[BCJ2_STREAM_RC])++; + } + + return SZ_OK; +} diff --git a/3rdparty/7z/src/Bcj2.h b/3rdparty/7z/src/Bcj2.h new file mode 100644 index 0000000000..68893d2d12 --- /dev/null +++ b/3rdparty/7z/src/Bcj2.h @@ -0,0 +1,146 @@ +/* Bcj2.h -- BCJ2 Converter for x86 code +2014-11-10 : Igor Pavlov : Public domain */ + +#ifndef __BCJ2_H +#define __BCJ2_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define BCJ2_NUM_STREAMS 4 + +enum +{ + BCJ2_STREAM_MAIN, + BCJ2_STREAM_CALL, + BCJ2_STREAM_JUMP, + BCJ2_STREAM_RC +}; + +enum +{ + BCJ2_DEC_STATE_ORIG_0 = BCJ2_NUM_STREAMS, + BCJ2_DEC_STATE_ORIG_1, + BCJ2_DEC_STATE_ORIG_2, + BCJ2_DEC_STATE_ORIG_3, + + BCJ2_DEC_STATE_ORIG, + BCJ2_DEC_STATE_OK +}; + +enum +{ + BCJ2_ENC_STATE_ORIG = BCJ2_NUM_STREAMS, + BCJ2_ENC_STATE_OK +}; + + +#define BCJ2_IS_32BIT_STREAM(s) ((s) == BCJ2_STREAM_CALL || (s) == BCJ2_STREAM_JUMP) + +/* +CBcj2Dec / CBcj2Enc +bufs sizes: + BUF_SIZE(n) = lims[n] - bufs[n] +bufs sizes for BCJ2_STREAM_CALL and BCJ2_STREAM_JUMP must be mutliply of 4: + (BUF_SIZE(BCJ2_STREAM_CALL) & 3) == 0 + (BUF_SIZE(BCJ2_STREAM_JUMP) & 3) == 0 +*/ + +/* +CBcj2Dec: +dest is allowed to overlap with bufs[BCJ2_STREAM_MAIN], with the following conditions: + bufs[BCJ2_STREAM_MAIN] >= dest && + bufs[BCJ2_STREAM_MAIN] - dest >= tempReserv + + BUF_SIZE(BCJ2_STREAM_CALL) + + BUF_SIZE(BCJ2_STREAM_JUMP) + tempReserv = 0 : for first call of Bcj2Dec_Decode + tempReserv = 4 : for any other calls of Bcj2Dec_Decode + overlap with offset = 1 is not allowed +*/ + +typedef struct +{ + const Byte *bufs[BCJ2_NUM_STREAMS]; + const Byte *lims[BCJ2_NUM_STREAMS]; + Byte *dest; + const Byte *destLim; + + unsigned state; /* BCJ2_STREAM_MAIN has more priority than BCJ2_STATE_ORIG */ + + UInt32 ip; + Byte temp[4]; + UInt32 range; + UInt32 code; + UInt16 probs[2 + 256]; +} CBcj2Dec; + +void Bcj2Dec_Init(CBcj2Dec *p); + +/* Returns: SZ_OK or SZ_ERROR_DATA */ +SRes Bcj2Dec_Decode(CBcj2Dec *p); + +#define Bcj2Dec_IsFinished(_p_) ((_p_)->code == 0) + + + +typedef enum +{ + BCJ2_ENC_FINISH_MODE_CONTINUE, + BCJ2_ENC_FINISH_MODE_END_BLOCK, + BCJ2_ENC_FINISH_MODE_END_STREAM +} EBcj2Enc_FinishMode; + +typedef struct +{ + Byte *bufs[BCJ2_NUM_STREAMS]; + const Byte *lims[BCJ2_NUM_STREAMS]; + const Byte *src; + const Byte *srcLim; + + unsigned state; + EBcj2Enc_FinishMode finishMode; + + Byte prevByte; + + Byte cache; + UInt32 range; + UInt64 low; + UInt64 cacheSize; + + UInt32 ip; + + /* 32-bit ralative offset in JUMP/CALL commands is + - (mod 4 GB) in 32-bit mode + - signed Int32 in 64-bit mode + We use (mod 4 GB) check for fileSize. + Use fileSize up to 2 GB, if you want to support 32-bit and 64-bit code conversion. */ + UInt32 fileIp; + UInt32 fileSize; /* (fileSize <= ((UInt32)1 << 31)), 0 means no_limit */ + UInt32 relatLimit; /* (relatLimit <= ((UInt32)1 << 31)), 0 means desable_conversion */ + + UInt32 tempTarget; + unsigned tempPos; + Byte temp[4 * 2]; + + unsigned flushPos; + + UInt16 probs[2 + 256]; +} CBcj2Enc; + +void Bcj2Enc_Init(CBcj2Enc *p); +void Bcj2Enc_Encode(CBcj2Enc *p); + +#define Bcj2Enc_Get_InputData_Size(p) ((SizeT)((p)->srcLim - (p)->src) + (p)->tempPos) +#define Bcj2Enc_IsFinished(p) ((p)->flushPos == 5) + + +#define BCJ2_RELAT_LIMIT_NUM_BITS 26 +#define BCJ2_RELAT_LIMIT ((UInt32)1 << BCJ2_RELAT_LIMIT_NUM_BITS) + +/* limit for CBcj2Enc::fileSize variable */ +#define BCJ2_FileSize_MAX ((UInt32)1 << 31) + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Bcj2Enc.c b/3rdparty/7z/src/Bcj2Enc.c new file mode 100644 index 0000000000..7a02ecde25 --- /dev/null +++ b/3rdparty/7z/src/Bcj2Enc.c @@ -0,0 +1,311 @@ +/* Bcj2Enc.c -- BCJ2 Encoder (Converter for x86 code) +2019-02-02 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +/* #define SHOW_STAT */ + +#ifdef SHOW_STAT +#include +#define PRF(x) x +#else +#define PRF(x) +#endif + +#include + +#include "Bcj2.h" +#include "CpuArch.h" + +#define CProb UInt16 + +#define kTopValue ((UInt32)1 << 24) +#define kNumModelBits 11 +#define kBitModelTotal (1 << kNumModelBits) +#define kNumMoveBits 5 + +void Bcj2Enc_Init(CBcj2Enc *p) +{ + unsigned i; + + p->state = BCJ2_ENC_STATE_OK; + p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE; + + p->prevByte = 0; + + p->cache = 0; + p->range = 0xFFFFFFFF; + p->low = 0; + p->cacheSize = 1; + + p->ip = 0; + + p->fileIp = 0; + p->fileSize = 0; + p->relatLimit = BCJ2_RELAT_LIMIT; + + p->tempPos = 0; + + p->flushPos = 0; + + for (i = 0; i < sizeof(p->probs) / sizeof(p->probs[0]); i++) + p->probs[i] = kBitModelTotal >> 1; +} + +static BoolInt MY_FAST_CALL RangeEnc_ShiftLow(CBcj2Enc *p) +{ + if ((UInt32)p->low < (UInt32)0xFF000000 || (UInt32)(p->low >> 32) != 0) + { + Byte *buf = p->bufs[BCJ2_STREAM_RC]; + do + { + if (buf == p->lims[BCJ2_STREAM_RC]) + { + p->state = BCJ2_STREAM_RC; + p->bufs[BCJ2_STREAM_RC] = buf; + return True; + } + *buf++ = (Byte)(p->cache + (Byte)(p->low >> 32)); + p->cache = 0xFF; + } + while (--p->cacheSize); + p->bufs[BCJ2_STREAM_RC] = buf; + p->cache = (Byte)((UInt32)p->low >> 24); + } + p->cacheSize++; + p->low = (UInt32)p->low << 8; + return False; +} + +static void Bcj2Enc_Encode_2(CBcj2Enc *p) +{ + if (BCJ2_IS_32BIT_STREAM(p->state)) + { + Byte *cur = p->bufs[p->state]; + if (cur == p->lims[p->state]) + return; + SetBe32(cur, p->tempTarget); + p->bufs[p->state] = cur + 4; + } + + p->state = BCJ2_ENC_STATE_ORIG; + + for (;;) + { + if (p->range < kTopValue) + { + if (RangeEnc_ShiftLow(p)) + return; + p->range <<= 8; + } + + { + { + const Byte *src = p->src; + const Byte *srcLim; + Byte *dest; + SizeT num = p->srcLim - src; + + if (p->finishMode == BCJ2_ENC_FINISH_MODE_CONTINUE) + { + if (num <= 4) + return; + num -= 4; + } + else if (num == 0) + break; + + dest = p->bufs[BCJ2_STREAM_MAIN]; + if (num > (SizeT)(p->lims[BCJ2_STREAM_MAIN] - dest)) + { + num = p->lims[BCJ2_STREAM_MAIN] - dest; + if (num == 0) + { + p->state = BCJ2_STREAM_MAIN; + return; + } + } + + srcLim = src + num; + + if (p->prevByte == 0x0F && (src[0] & 0xF0) == 0x80) + *dest = src[0]; + else for (;;) + { + Byte b = *src; + *dest = b; + if (b != 0x0F) + { + if ((b & 0xFE) == 0xE8) + break; + dest++; + if (++src != srcLim) + continue; + break; + } + dest++; + if (++src == srcLim) + break; + if ((*src & 0xF0) != 0x80) + continue; + *dest = *src; + break; + } + + num = src - p->src; + + if (src == srcLim) + { + p->prevByte = src[-1]; + p->bufs[BCJ2_STREAM_MAIN] = dest; + p->src = src; + p->ip += (UInt32)num; + continue; + } + + { + Byte context = (Byte)(num == 0 ? p->prevByte : src[-1]); + BoolInt needConvert; + + p->bufs[BCJ2_STREAM_MAIN] = dest + 1; + p->ip += (UInt32)num + 1; + src++; + + needConvert = False; + + if ((SizeT)(p->srcLim - src) >= 4) + { + UInt32 relatVal = GetUi32(src); + if ((p->fileSize == 0 || (UInt32)(p->ip + 4 + relatVal - p->fileIp) < p->fileSize) + && ((relatVal + p->relatLimit) >> 1) < p->relatLimit) + needConvert = True; + } + + { + UInt32 bound; + unsigned ttt; + Byte b = src[-1]; + CProb *prob = p->probs + (unsigned)(b == 0xE8 ? 2 + (unsigned)context : (b == 0xE9 ? 1 : 0)); + + ttt = *prob; + bound = (p->range >> kNumModelBits) * ttt; + + if (!needConvert) + { + p->range = bound; + *prob = (CProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); + p->src = src; + p->prevByte = b; + continue; + } + + p->low += bound; + p->range -= bound; + *prob = (CProb)(ttt - (ttt >> kNumMoveBits)); + + { + UInt32 relatVal = GetUi32(src); + UInt32 absVal; + p->ip += 4; + absVal = p->ip + relatVal; + p->prevByte = src[3]; + src += 4; + p->src = src; + { + unsigned cj = (b == 0xE8) ? BCJ2_STREAM_CALL : BCJ2_STREAM_JUMP; + Byte *cur = p->bufs[cj]; + if (cur == p->lims[cj]) + { + p->state = cj; + p->tempTarget = absVal; + return; + } + SetBe32(cur, absVal); + p->bufs[cj] = cur + 4; + } + } + } + } + } + } + } + + if (p->finishMode != BCJ2_ENC_FINISH_MODE_END_STREAM) + return; + + for (; p->flushPos < 5; p->flushPos++) + if (RangeEnc_ShiftLow(p)) + return; + p->state = BCJ2_ENC_STATE_OK; +} + + +void Bcj2Enc_Encode(CBcj2Enc *p) +{ + PRF(printf("\n")); + PRF(printf("---- ip = %8d tempPos = %8d src = %8d\n", p->ip, p->tempPos, p->srcLim - p->src)); + + if (p->tempPos != 0) + { + unsigned extra = 0; + + for (;;) + { + const Byte *src = p->src; + const Byte *srcLim = p->srcLim; + EBcj2Enc_FinishMode finishMode = p->finishMode; + + p->src = p->temp; + p->srcLim = p->temp + p->tempPos; + if (src != srcLim) + p->finishMode = BCJ2_ENC_FINISH_MODE_CONTINUE; + + PRF(printf(" ip = %8d tempPos = %8d src = %8d\n", p->ip, p->tempPos, p->srcLim - p->src)); + + Bcj2Enc_Encode_2(p); + + { + unsigned num = (unsigned)(p->src - p->temp); + unsigned tempPos = p->tempPos - num; + unsigned i; + p->tempPos = tempPos; + for (i = 0; i < tempPos; i++) + p->temp[i] = p->temp[(size_t)i + num]; + + p->src = src; + p->srcLim = srcLim; + p->finishMode = finishMode; + + if (p->state != BCJ2_ENC_STATE_ORIG || src == srcLim) + return; + + if (extra >= tempPos) + { + p->src = src - tempPos; + p->tempPos = 0; + break; + } + + p->temp[tempPos] = src[0]; + p->tempPos = tempPos + 1; + p->src = src + 1; + extra++; + } + } + } + + PRF(printf("++++ ip = %8d tempPos = %8d src = %8d\n", p->ip, p->tempPos, p->srcLim - p->src)); + + Bcj2Enc_Encode_2(p); + + if (p->state == BCJ2_ENC_STATE_ORIG) + { + const Byte *src = p->src; + unsigned rem = (unsigned)(p->srcLim - src); + unsigned i; + for (i = 0; i < rem; i++) + p->temp[i] = src[i]; + p->tempPos = rem; + p->src = src + rem; + } +} diff --git a/3rdparty/7z/src/Bra.c b/3rdparty/7z/src/Bra.c new file mode 100644 index 0000000000..cbdcb290df --- /dev/null +++ b/3rdparty/7z/src/Bra.c @@ -0,0 +1,230 @@ +/* Bra.c -- Converters for RISC code +2017-04-04 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "CpuArch.h" +#include "Bra.h" + +SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding) +{ + Byte *p; + const Byte *lim; + size &= ~(size_t)3; + ip += 4; + p = data; + lim = data + size; + + if (encoding) + + for (;;) + { + for (;;) + { + if (p >= lim) + return p - data; + p += 4; + if (p[-1] == 0xEB) + break; + } + { + UInt32 v = GetUi32(p - 4); + v <<= 2; + v += ip + (UInt32)(p - data); + v >>= 2; + v &= 0x00FFFFFF; + v |= 0xEB000000; + SetUi32(p - 4, v); + } + } + + for (;;) + { + for (;;) + { + if (p >= lim) + return p - data; + p += 4; + if (p[-1] == 0xEB) + break; + } + { + UInt32 v = GetUi32(p - 4); + v <<= 2; + v -= ip + (UInt32)(p - data); + v >>= 2; + v &= 0x00FFFFFF; + v |= 0xEB000000; + SetUi32(p - 4, v); + } + } +} + + +SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding) +{ + Byte *p; + const Byte *lim; + size &= ~(size_t)1; + p = data; + lim = data + size - 4; + + if (encoding) + + for (;;) + { + UInt32 b1; + for (;;) + { + UInt32 b3; + if (p > lim) + return p - data; + b1 = p[1]; + b3 = p[3]; + p += 2; + b1 ^= 8; + if ((b3 & b1) >= 0xF8) + break; + } + { + UInt32 v = + ((UInt32)b1 << 19) + + (((UInt32)p[1] & 0x7) << 8) + + (((UInt32)p[-2] << 11)) + + (p[0]); + + p += 2; + { + UInt32 cur = (ip + (UInt32)(p - data)) >> 1; + v += cur; + } + + p[-4] = (Byte)(v >> 11); + p[-3] = (Byte)(0xF0 | ((v >> 19) & 0x7)); + p[-2] = (Byte)v; + p[-1] = (Byte)(0xF8 | (v >> 8)); + } + } + + for (;;) + { + UInt32 b1; + for (;;) + { + UInt32 b3; + if (p > lim) + return p - data; + b1 = p[1]; + b3 = p[3]; + p += 2; + b1 ^= 8; + if ((b3 & b1) >= 0xF8) + break; + } + { + UInt32 v = + ((UInt32)b1 << 19) + + (((UInt32)p[1] & 0x7) << 8) + + (((UInt32)p[-2] << 11)) + + (p[0]); + + p += 2; + { + UInt32 cur = (ip + (UInt32)(p - data)) >> 1; + v -= cur; + } + + /* + SetUi16(p - 4, (UInt16)(((v >> 11) & 0x7FF) | 0xF000)); + SetUi16(p - 2, (UInt16)(v | 0xF800)); + */ + + p[-4] = (Byte)(v >> 11); + p[-3] = (Byte)(0xF0 | ((v >> 19) & 0x7)); + p[-2] = (Byte)v; + p[-1] = (Byte)(0xF8 | (v >> 8)); + } + } +} + + +SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding) +{ + Byte *p; + const Byte *lim; + size &= ~(size_t)3; + ip -= 4; + p = data; + lim = data + size; + + for (;;) + { + for (;;) + { + if (p >= lim) + return p - data; + p += 4; + /* if ((v & 0xFC000003) == 0x48000001) */ + if ((p[-4] & 0xFC) == 0x48 && (p[-1] & 3) == 1) + break; + } + { + UInt32 v = GetBe32(p - 4); + if (encoding) + v += ip + (UInt32)(p - data); + else + v -= ip + (UInt32)(p - data); + v &= 0x03FFFFFF; + v |= 0x48000000; + SetBe32(p - 4, v); + } + } +} + + +SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding) +{ + Byte *p; + const Byte *lim; + size &= ~(size_t)3; + ip -= 4; + p = data; + lim = data + size; + + for (;;) + { + for (;;) + { + if (p >= lim) + return p - data; + /* + v = GetBe32(p); + p += 4; + m = v + ((UInt32)5 << 29); + m ^= (UInt32)7 << 29; + m += (UInt32)1 << 22; + if ((m & ((UInt32)0x1FF << 23)) == 0) + break; + */ + p += 4; + if ((p[-4] == 0x40 && (p[-3] & 0xC0) == 0) || + (p[-4] == 0x7F && (p[-3] >= 0xC0))) + break; + } + { + UInt32 v = GetBe32(p - 4); + v <<= 2; + if (encoding) + v += ip + (UInt32)(p - data); + else + v -= ip + (UInt32)(p - data); + + v &= 0x01FFFFFF; + v -= (UInt32)1 << 24; + v ^= 0xFF000000; + v >>= 2; + v |= 0x40000000; + SetBe32(p - 4, v); + } + } +} diff --git a/3rdparty/7z/src/Bra.h b/3rdparty/7z/src/Bra.h new file mode 100644 index 0000000000..aba8dce14f --- /dev/null +++ b/3rdparty/7z/src/Bra.h @@ -0,0 +1,64 @@ +/* Bra.h -- Branch converters for executables +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __BRA_H +#define __BRA_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +/* +These functions convert relative addresses to absolute addresses +in CALL instructions to increase the compression ratio. + + In: + data - data buffer + size - size of data + ip - current virtual Instruction Pinter (IP) value + state - state variable for x86 converter + encoding - 0 (for decoding), 1 (for encoding) + + Out: + state - state variable for x86 converter + + Returns: + The number of processed bytes. If you call these functions with multiple calls, + you must start next call with first byte after block of processed bytes. + + Type Endian Alignment LookAhead + + x86 little 1 4 + ARMT little 2 2 + ARM little 4 0 + PPC big 4 0 + SPARC big 4 0 + IA64 little 16 0 + + size must be >= Alignment + LookAhead, if it's not last block. + If (size < Alignment + LookAhead), converter returns 0. + + Example: + + UInt32 ip = 0; + for () + { + ; size must be >= Alignment + LookAhead, if it's not last block + SizeT processed = Convert(data, size, ip, 1); + data += processed; + size -= processed; + ip += processed; + } +*/ + +#define x86_Convert_Init(state) { state = 0; } +SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding); +SizeT ARM_Convert(Byte *data, SizeT size, UInt32 ip, int encoding); +SizeT ARMT_Convert(Byte *data, SizeT size, UInt32 ip, int encoding); +SizeT PPC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding); +SizeT SPARC_Convert(Byte *data, SizeT size, UInt32 ip, int encoding); +SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Bra86.c b/3rdparty/7z/src/Bra86.c new file mode 100644 index 0000000000..a6463c63ba --- /dev/null +++ b/3rdparty/7z/src/Bra86.c @@ -0,0 +1,82 @@ +/* Bra86.c -- Converter for x86 code (BCJ) +2017-04-03 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "Bra.h" + +#define Test86MSByte(b) ((((b) + 1) & 0xFE) == 0) + +SizeT x86_Convert(Byte *data, SizeT size, UInt32 ip, UInt32 *state, int encoding) +{ + SizeT pos = 0; + UInt32 mask = *state & 7; + if (size < 5) + return 0; + size -= 4; + ip += 5; + + for (;;) + { + Byte *p = data + pos; + const Byte *limit = data + size; + for (; p < limit; p++) + if ((*p & 0xFE) == 0xE8) + break; + + { + SizeT d = (SizeT)(p - data - pos); + pos = (SizeT)(p - data); + if (p >= limit) + { + *state = (d > 2 ? 0 : mask >> (unsigned)d); + return pos; + } + if (d > 2) + mask = 0; + else + { + mask >>= (unsigned)d; + if (mask != 0 && (mask > 4 || mask == 3 || Test86MSByte(p[(size_t)(mask >> 1) + 1]))) + { + mask = (mask >> 1) | 4; + pos++; + continue; + } + } + } + + if (Test86MSByte(p[4])) + { + UInt32 v = ((UInt32)p[4] << 24) | ((UInt32)p[3] << 16) | ((UInt32)p[2] << 8) | ((UInt32)p[1]); + UInt32 cur = ip + (UInt32)pos; + pos += 5; + if (encoding) + v += cur; + else + v -= cur; + if (mask != 0) + { + unsigned sh = (mask & 6) << 2; + if (Test86MSByte((Byte)(v >> sh))) + { + v ^= (((UInt32)0x100 << sh) - 1); + if (encoding) + v += cur; + else + v -= cur; + } + mask = 0; + } + p[1] = (Byte)v; + p[2] = (Byte)(v >> 8); + p[3] = (Byte)(v >> 16); + p[4] = (Byte)(0 - ((v >> 24) & 1)); + } + else + { + mask = (mask >> 1) | 4; + pos++; + } + } +} diff --git a/3rdparty/7z/src/BraIA64.c b/3rdparty/7z/src/BraIA64.c new file mode 100644 index 0000000000..2656907a0b --- /dev/null +++ b/3rdparty/7z/src/BraIA64.c @@ -0,0 +1,53 @@ +/* BraIA64.c -- Converter for IA-64 code +2017-01-26 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "CpuArch.h" +#include "Bra.h" + +SizeT IA64_Convert(Byte *data, SizeT size, UInt32 ip, int encoding) +{ + SizeT i; + if (size < 16) + return 0; + size -= 16; + i = 0; + do + { + unsigned m = ((UInt32)0x334B0000 >> (data[i] & 0x1E)) & 3; + if (m) + { + m++; + do + { + Byte *p = data + (i + (size_t)m * 5 - 8); + if (((p[3] >> m) & 15) == 5 + && (((p[-1] | ((UInt32)p[0] << 8)) >> m) & 0x70) == 0) + { + unsigned raw = GetUi32(p); + unsigned v = raw >> m; + v = (v & 0xFFFFF) | ((v & (1 << 23)) >> 3); + + v <<= 4; + if (encoding) + v += ip + (UInt32)i; + else + v -= ip + (UInt32)i; + v >>= 4; + + v &= 0x1FFFFF; + v += 0x700000; + v &= 0x8FFFFF; + raw &= ~((UInt32)0x8FFFFF << m); + raw |= (v << m); + SetUi32(p, raw); + } + } + while (++m <= 4); + } + i += 16; + } + while (i <= size); + return i; +} diff --git a/3rdparty/7z/src/Compiler.h b/3rdparty/7z/src/Compiler.h new file mode 100644 index 0000000000..c788648cd2 --- /dev/null +++ b/3rdparty/7z/src/Compiler.h @@ -0,0 +1,33 @@ +/* Compiler.h +2017-04-03 : Igor Pavlov : Public domain */ + +#ifndef __7Z_COMPILER_H +#define __7Z_COMPILER_H + +#ifdef _MSC_VER + + #ifdef UNDER_CE + #define RPC_NO_WINDOWS_H + /* #pragma warning(disable : 4115) // '_RPC_ASYNC_STATE' : named type definition in parentheses */ + #pragma warning(disable : 4201) // nonstandard extension used : nameless struct/union + #pragma warning(disable : 4214) // nonstandard extension used : bit field types other than int + #endif + + #if _MSC_VER >= 1300 + #pragma warning(disable : 4996) // This function or variable may be unsafe + #else + #pragma warning(disable : 4511) // copy constructor could not be generated + #pragma warning(disable : 4512) // assignment operator could not be generated + #pragma warning(disable : 4514) // unreferenced inline function has been removed + #pragma warning(disable : 4702) // unreachable code + #pragma warning(disable : 4710) // not inlined + #pragma warning(disable : 4714) // function marked as __forceinline not inlined + #pragma warning(disable : 4786) // identifier was truncated to '255' characters in the debug information + #endif + +#endif + +#define UNUSED_VAR(x) (void)x; +/* #define UNUSED_VAR(x) x=x; */ + +#endif diff --git a/3rdparty/7z/src/CpuArch.c b/3rdparty/7z/src/CpuArch.c new file mode 100644 index 0000000000..ff1890e7fe --- /dev/null +++ b/3rdparty/7z/src/CpuArch.c @@ -0,0 +1,218 @@ +/* CpuArch.c -- CPU specific code +2018-02-18: Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "CpuArch.h" + +#ifdef MY_CPU_X86_OR_AMD64 + +#if (defined(_MSC_VER) && !defined(MY_CPU_AMD64)) || defined(__GNUC__) +#define USE_ASM +#endif + +#if !defined(USE_ASM) && _MSC_VER >= 1500 +#include +#endif + +#if defined(USE_ASM) && !defined(MY_CPU_AMD64) +static UInt32 CheckFlag(UInt32 flag) +{ + #ifdef _MSC_VER + __asm pushfd; + __asm pop EAX; + __asm mov EDX, EAX; + __asm xor EAX, flag; + __asm push EAX; + __asm popfd; + __asm pushfd; + __asm pop EAX; + __asm xor EAX, EDX; + __asm push EDX; + __asm popfd; + __asm and flag, EAX; + #else + __asm__ __volatile__ ( + "pushf\n\t" + "pop %%EAX\n\t" + "movl %%EAX,%%EDX\n\t" + "xorl %0,%%EAX\n\t" + "push %%EAX\n\t" + "popf\n\t" + "pushf\n\t" + "pop %%EAX\n\t" + "xorl %%EDX,%%EAX\n\t" + "push %%EDX\n\t" + "popf\n\t" + "andl %%EAX, %0\n\t": + "=c" (flag) : "c" (flag) : + "%eax", "%edx"); + #endif + return flag; +} +#define CHECK_CPUID_IS_SUPPORTED if (CheckFlag(1 << 18) == 0 || CheckFlag(1 << 21) == 0) return False; +#else +#define CHECK_CPUID_IS_SUPPORTED +#endif + +void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d) +{ + #ifdef USE_ASM + + #ifdef _MSC_VER + + UInt32 a2, b2, c2, d2; + __asm xor EBX, EBX; + __asm xor ECX, ECX; + __asm xor EDX, EDX; + __asm mov EAX, function; + __asm cpuid; + __asm mov a2, EAX; + __asm mov b2, EBX; + __asm mov c2, ECX; + __asm mov d2, EDX; + + *a = a2; + *b = b2; + *c = c2; + *d = d2; + + #else + + __asm__ __volatile__ ( + #if defined(MY_CPU_AMD64) && defined(__PIC__) + "mov %%rbx, %%rdi;" + "cpuid;" + "xchg %%rbx, %%rdi;" + : "=a" (*a) , + "=D" (*b) , + #elif defined(MY_CPU_X86) && defined(__PIC__) + "mov %%ebx, %%edi;" + "cpuid;" + "xchgl %%ebx, %%edi;" + : "=a" (*a) , + "=D" (*b) , + #else + "cpuid" + : "=a" (*a) , + "=b" (*b) , + #endif + "=c" (*c) , + "=d" (*d) + : "0" (function)) ; + + #endif + + #else + + int CPUInfo[4]; + __cpuid(CPUInfo, function); + *a = CPUInfo[0]; + *b = CPUInfo[1]; + *c = CPUInfo[2]; + *d = CPUInfo[3]; + + #endif +} + +BoolInt x86cpuid_CheckAndRead(Cx86cpuid *p) +{ + CHECK_CPUID_IS_SUPPORTED + MyCPUID(0, &p->maxFunc, &p->vendor[0], &p->vendor[2], &p->vendor[1]); + MyCPUID(1, &p->ver, &p->b, &p->c, &p->d); + return True; +} + +static const UInt32 kVendors[][3] = +{ + { 0x756E6547, 0x49656E69, 0x6C65746E}, + { 0x68747541, 0x69746E65, 0x444D4163}, + { 0x746E6543, 0x48727561, 0x736C7561} +}; + +int x86cpuid_GetFirm(const Cx86cpuid *p) +{ + unsigned i; + for (i = 0; i < sizeof(kVendors) / sizeof(kVendors[i]); i++) + { + const UInt32 *v = kVendors[i]; + if (v[0] == p->vendor[0] && + v[1] == p->vendor[1] && + v[2] == p->vendor[2]) + return (int)i; + } + return -1; +} + +BoolInt CPU_Is_InOrder() +{ + Cx86cpuid p; + int firm; + UInt32 family, model; + if (!x86cpuid_CheckAndRead(&p)) + return True; + + family = x86cpuid_GetFamily(p.ver); + model = x86cpuid_GetModel(p.ver); + + firm = x86cpuid_GetFirm(&p); + + switch (firm) + { + case CPU_FIRM_INTEL: return (family < 6 || (family == 6 && ( + /* In-Order Atom CPU */ + model == 0x1C /* 45 nm, N4xx, D4xx, N5xx, D5xx, 230, 330 */ + || model == 0x26 /* 45 nm, Z6xx */ + || model == 0x27 /* 32 nm, Z2460 */ + || model == 0x35 /* 32 nm, Z2760 */ + || model == 0x36 /* 32 nm, N2xxx, D2xxx */ + ))); + case CPU_FIRM_AMD: return (family < 5 || (family == 5 && (model < 6 || model == 0xA))); + case CPU_FIRM_VIA: return (family < 6 || (family == 6 && model < 0xF)); + } + return True; +} + +#if !defined(MY_CPU_AMD64) && defined(_WIN32) +#include +static BoolInt CPU_Sys_Is_SSE_Supported() +{ + OSVERSIONINFO vi; + vi.dwOSVersionInfoSize = sizeof(vi); + if (!GetVersionEx(&vi)) + return False; + return (vi.dwMajorVersion >= 5); +} +#define CHECK_SYS_SSE_SUPPORT if (!CPU_Sys_Is_SSE_Supported()) return False; +#else +#define CHECK_SYS_SSE_SUPPORT +#endif + +BoolInt CPU_Is_Aes_Supported() +{ + Cx86cpuid p; + CHECK_SYS_SSE_SUPPORT + if (!x86cpuid_CheckAndRead(&p)) + return False; + return (p.c >> 25) & 1; +} + +BoolInt CPU_IsSupported_PageGB() +{ + Cx86cpuid cpuid; + if (!x86cpuid_CheckAndRead(&cpuid)) + return False; + { + UInt32 d[4] = { 0 }; + MyCPUID(0x80000000, &d[0], &d[1], &d[2], &d[3]); + if (d[0] < 0x80000001) + return False; + } + { + UInt32 d[4] = { 0 }; + MyCPUID(0x80000001, &d[0], &d[1], &d[2], &d[3]); + return (d[3] >> 26) & 1; + } +} + +#endif diff --git a/3rdparty/7z/src/CpuArch.h b/3rdparty/7z/src/CpuArch.h new file mode 100644 index 0000000000..5f74c1c0cb --- /dev/null +++ b/3rdparty/7z/src/CpuArch.h @@ -0,0 +1,336 @@ +/* CpuArch.h -- CPU specific code +2018-02-18 : Igor Pavlov : Public domain */ + +#ifndef __CPU_ARCH_H +#define __CPU_ARCH_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +/* +MY_CPU_LE means that CPU is LITTLE ENDIAN. +MY_CPU_BE means that CPU is BIG ENDIAN. +If MY_CPU_LE and MY_CPU_BE are not defined, we don't know about ENDIANNESS of platform. + +MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses. +*/ + +#if defined(_M_X64) \ + || defined(_M_AMD64) \ + || defined(__x86_64__) \ + || defined(__AMD64__) \ + || defined(__amd64__) + #define MY_CPU_AMD64 + #ifdef __ILP32__ + #define MY_CPU_NAME "x32" + #else + #define MY_CPU_NAME "x64" + #endif + #define MY_CPU_64BIT +#endif + + +#if defined(_M_IX86) \ + || defined(__i386__) + #define MY_CPU_X86 + #define MY_CPU_NAME "x86" + #define MY_CPU_32BIT +#endif + + +#if defined(_M_ARM64) \ + || defined(__AARCH64EL__) \ + || defined(__AARCH64EB__) \ + || defined(__aarch64__) + #define MY_CPU_ARM64 + #define MY_CPU_NAME "arm64" + #define MY_CPU_64BIT +#endif + + +#if defined(_M_ARM) \ + || defined(_M_ARM_NT) \ + || defined(_M_ARMT) \ + || defined(__arm__) \ + || defined(__thumb__) \ + || defined(__ARMEL__) \ + || defined(__ARMEB__) \ + || defined(__THUMBEL__) \ + || defined(__THUMBEB__) + #define MY_CPU_ARM + #define MY_CPU_NAME "arm" + #define MY_CPU_32BIT +#endif + + +#if defined(_M_IA64) \ + || defined(__ia64__) + #define MY_CPU_IA64 + #define MY_CPU_NAME "ia64" + #define MY_CPU_64BIT +#endif + + +#if defined(__mips64) \ + || defined(__mips64__) \ + || (defined(__mips) && (__mips == 64 || __mips == 4 || __mips == 3)) + #define MY_CPU_NAME "mips64" + #define MY_CPU_64BIT +#elif defined(__mips__) + #define MY_CPU_NAME "mips" + /* #define MY_CPU_32BIT */ +#endif + + +#if defined(__ppc64__) \ + || defined(__powerpc64__) + #ifdef __ILP32__ + #define MY_CPU_NAME "ppc64-32" + #else + #define MY_CPU_NAME "ppc64" + #endif + #define MY_CPU_64BIT +#elif defined(__ppc__) \ + || defined(__powerpc__) + #define MY_CPU_NAME "ppc" + #define MY_CPU_32BIT +#endif + + +#if defined(__sparc64__) + #define MY_CPU_NAME "sparc64" + #define MY_CPU_64BIT +#elif defined(__sparc__) + #define MY_CPU_NAME "sparc" + /* #define MY_CPU_32BIT */ +#endif + + +#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64) +#define MY_CPU_X86_OR_AMD64 +#endif + + +#ifdef _WIN32 + + #ifdef MY_CPU_ARM + #define MY_CPU_ARM_LE + #endif + + #ifdef MY_CPU_ARM64 + #define MY_CPU_ARM64_LE + #endif + + #ifdef _M_IA64 + #define MY_CPU_IA64_LE + #endif + +#endif + + +#if defined(MY_CPU_X86_OR_AMD64) \ + || defined(MY_CPU_ARM_LE) \ + || defined(MY_CPU_ARM64_LE) \ + || defined(MY_CPU_IA64_LE) \ + || defined(__LITTLE_ENDIAN__) \ + || defined(__ARMEL__) \ + || defined(__THUMBEL__) \ + || defined(__AARCH64EL__) \ + || defined(__MIPSEL__) \ + || defined(__MIPSEL) \ + || defined(_MIPSEL) \ + || defined(__BFIN__) \ + || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + #define MY_CPU_LE +#endif + +#if defined(__BIG_ENDIAN__) \ + || defined(__ARMEB__) \ + || defined(__THUMBEB__) \ + || defined(__AARCH64EB__) \ + || defined(__MIPSEB__) \ + || defined(__MIPSEB) \ + || defined(_MIPSEB) \ + || defined(__m68k__) \ + || defined(__s390__) \ + || defined(__s390x__) \ + || defined(__zarch__) \ + || (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) + #define MY_CPU_BE +#endif + + +#if defined(MY_CPU_LE) && defined(MY_CPU_BE) + #error Stop_Compiling_Bad_Endian +#endif + + +#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT) + #error Stop_Compiling_Bad_32_64_BIT +#endif + + +#ifndef MY_CPU_NAME + #ifdef MY_CPU_LE + #define MY_CPU_NAME "LE" + #elif defined(MY_CPU_BE) + #define MY_CPU_NAME "BE" + #else + /* + #define MY_CPU_NAME "" + */ + #endif +#endif + + + + + +#ifdef MY_CPU_LE + #if defined(MY_CPU_X86_OR_AMD64) \ + || defined(MY_CPU_ARM64) \ + || defined(__ARM_FEATURE_UNALIGNED) + #define MY_CPU_LE_UNALIGN + #endif +#endif + + +#ifdef MY_CPU_LE_UNALIGN + +#define GetUi16(p) (*(const UInt16 *)(const void *)(p)) +#define GetUi32(p) (*(const UInt32 *)(const void *)(p)) +#define GetUi64(p) (*(const UInt64 *)(const void *)(p)) + +#define SetUi16(p, v) { *(UInt16 *)(p) = (v); } +#define SetUi32(p, v) { *(UInt32 *)(p) = (v); } +#define SetUi64(p, v) { *(UInt64 *)(p) = (v); } + +#else + +#define GetUi16(p) ( (UInt16) ( \ + ((const Byte *)(p))[0] | \ + ((UInt16)((const Byte *)(p))[1] << 8) )) + +#define GetUi32(p) ( \ + ((const Byte *)(p))[0] | \ + ((UInt32)((const Byte *)(p))[1] << 8) | \ + ((UInt32)((const Byte *)(p))[2] << 16) | \ + ((UInt32)((const Byte *)(p))[3] << 24)) + +#define GetUi64(p) (GetUi32(p) | ((UInt64)GetUi32(((const Byte *)(p)) + 4) << 32)) + +#define SetUi16(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \ + _ppp_[0] = (Byte)_vvv_; \ + _ppp_[1] = (Byte)(_vvv_ >> 8); } + +#define SetUi32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \ + _ppp_[0] = (Byte)_vvv_; \ + _ppp_[1] = (Byte)(_vvv_ >> 8); \ + _ppp_[2] = (Byte)(_vvv_ >> 16); \ + _ppp_[3] = (Byte)(_vvv_ >> 24); } + +#define SetUi64(p, v) { Byte *_ppp2_ = (Byte *)(p); UInt64 _vvv2_ = (v); \ + SetUi32(_ppp2_ , (UInt32)_vvv2_); \ + SetUi32(_ppp2_ + 4, (UInt32)(_vvv2_ >> 32)); } + +#endif + +#ifdef __has_builtin + #define MY__has_builtin(x) __has_builtin(x) +#else + #define MY__has_builtin(x) 0 +#endif + +#if defined(MY_CPU_LE_UNALIGN) && /* defined(_WIN64) && */ (_MSC_VER >= 1300) + +/* Note: we use bswap instruction, that is unsupported in 386 cpu */ + +#include + +#pragma intrinsic(_byteswap_ushort) +#pragma intrinsic(_byteswap_ulong) +#pragma intrinsic(_byteswap_uint64) + +/* #define GetBe16(p) _byteswap_ushort(*(const UInt16 *)(const Byte *)(p)) */ +#define GetBe32(p) _byteswap_ulong(*(const UInt32 *)(const Byte *)(p)) +#define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const Byte *)(p)) + +#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = _byteswap_ulong(v) + +#elif defined(MY_CPU_LE_UNALIGN) && ( \ + (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \ + || (defined(__clang__) && MY__has_builtin(__builtin_bswap16)) ) + +/* #define GetBe16(p) __builtin_bswap16(*(const UInt16 *)(const Byte *)(p)) */ +#define GetBe32(p) __builtin_bswap32(*(const UInt32 *)(const Byte *)(p)) +#define GetBe64(p) __builtin_bswap64(*(const UInt64 *)(const Byte *)(p)) + +#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = __builtin_bswap32(v) + +#else + +#define GetBe32(p) ( \ + ((UInt32)((const Byte *)(p))[0] << 24) | \ + ((UInt32)((const Byte *)(p))[1] << 16) | \ + ((UInt32)((const Byte *)(p))[2] << 8) | \ + ((const Byte *)(p))[3] ) + +#define GetBe64(p) (((UInt64)GetBe32(p) << 32) | GetBe32(((const Byte *)(p)) + 4)) + +#define SetBe32(p, v) { Byte *_ppp_ = (Byte *)(p); UInt32 _vvv_ = (v); \ + _ppp_[0] = (Byte)(_vvv_ >> 24); \ + _ppp_[1] = (Byte)(_vvv_ >> 16); \ + _ppp_[2] = (Byte)(_vvv_ >> 8); \ + _ppp_[3] = (Byte)_vvv_; } + +#endif + + +#ifndef GetBe16 + +#define GetBe16(p) ( (UInt16) ( \ + ((UInt16)((const Byte *)(p))[0] << 8) | \ + ((const Byte *)(p))[1] )) + +#endif + + + +#ifdef MY_CPU_X86_OR_AMD64 + +typedef struct +{ + UInt32 maxFunc; + UInt32 vendor[3]; + UInt32 ver; + UInt32 b; + UInt32 c; + UInt32 d; +} Cx86cpuid; + +enum +{ + CPU_FIRM_INTEL, + CPU_FIRM_AMD, + CPU_FIRM_VIA +}; + +void MyCPUID(UInt32 function, UInt32 *a, UInt32 *b, UInt32 *c, UInt32 *d); + +BoolInt x86cpuid_CheckAndRead(Cx86cpuid *p); +int x86cpuid_GetFirm(const Cx86cpuid *p); + +#define x86cpuid_GetFamily(ver) (((ver >> 16) & 0xFF0) | ((ver >> 8) & 0xF)) +#define x86cpuid_GetModel(ver) (((ver >> 12) & 0xF0) | ((ver >> 4) & 0xF)) +#define x86cpuid_GetStepping(ver) (ver & 0xF) + +BoolInt CPU_Is_InOrder(); +BoolInt CPU_Is_Aes_Supported(); +BoolInt CPU_IsSupported_PageGB(); + +#endif + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Delta.c b/3rdparty/7z/src/Delta.c new file mode 100644 index 0000000000..6cbbe46012 --- /dev/null +++ b/3rdparty/7z/src/Delta.c @@ -0,0 +1,64 @@ +/* Delta.c -- Delta converter +2009-05-26 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "Delta.h" + +void Delta_Init(Byte *state) +{ + unsigned i; + for (i = 0; i < DELTA_STATE_SIZE; i++) + state[i] = 0; +} + +static void MyMemCpy(Byte *dest, const Byte *src, unsigned size) +{ + unsigned i; + for (i = 0; i < size; i++) + dest[i] = src[i]; +} + +void Delta_Encode(Byte *state, unsigned delta, Byte *data, SizeT size) +{ + Byte buf[DELTA_STATE_SIZE]; + unsigned j = 0; + MyMemCpy(buf, state, delta); + { + SizeT i; + for (i = 0; i < size;) + { + for (j = 0; j < delta && i < size; i++, j++) + { + Byte b = data[i]; + data[i] = (Byte)(b - buf[j]); + buf[j] = b; + } + } + } + if (j == delta) + j = 0; + MyMemCpy(state, buf + j, delta - j); + MyMemCpy(state + delta - j, buf, j); +} + +void Delta_Decode(Byte *state, unsigned delta, Byte *data, SizeT size) +{ + Byte buf[DELTA_STATE_SIZE]; + unsigned j = 0; + MyMemCpy(buf, state, delta); + { + SizeT i; + for (i = 0; i < size;) + { + for (j = 0; j < delta && i < size; i++, j++) + { + buf[j] = data[i] = (Byte)(buf[j] + data[i]); + } + } + } + if (j == delta) + j = 0; + MyMemCpy(state, buf + j, delta - j); + MyMemCpy(state + delta - j, buf, j); +} diff --git a/3rdparty/7z/src/Delta.h b/3rdparty/7z/src/Delta.h new file mode 100644 index 0000000000..e59d5a252b --- /dev/null +++ b/3rdparty/7z/src/Delta.h @@ -0,0 +1,19 @@ +/* Delta.h -- Delta converter +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __DELTA_H +#define __DELTA_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define DELTA_STATE_SIZE 256 + +void Delta_Init(Byte *state); +void Delta_Encode(Byte *state, unsigned delta, Byte *data, SizeT size); +void Delta_Decode(Byte *state, unsigned delta, Byte *data, SizeT size); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/DllSecur.c b/3rdparty/7z/src/DllSecur.c new file mode 100644 index 0000000000..19a22a9f05 --- /dev/null +++ b/3rdparty/7z/src/DllSecur.c @@ -0,0 +1,108 @@ +/* DllSecur.c -- DLL loading security +2018-02-21 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#ifdef _WIN32 + +#include + +#include "DllSecur.h" + +#ifndef UNDER_CE + +typedef BOOL (WINAPI *Func_SetDefaultDllDirectories)(DWORD DirectoryFlags); + +#define MY_LOAD_LIBRARY_SEARCH_USER_DIRS 0x400 +#define MY_LOAD_LIBRARY_SEARCH_SYSTEM32 0x800 + +static const char * const g_Dlls = + #ifndef _CONSOLE + "UXTHEME\0" + #endif + "USERENV\0" + "SETUPAPI\0" + "APPHELP\0" + "PROPSYS\0" + "DWMAPI\0" + "CRYPTBASE\0" + "OLEACC\0" + "CLBCATQ\0" + "VERSION\0" + ; + +#endif + +void My_SetDefaultDllDirectories() +{ + #ifndef UNDER_CE + + OSVERSIONINFO vi; + vi.dwOSVersionInfoSize = sizeof(vi); + GetVersionEx(&vi); + if (!GetVersionEx(&vi) || vi.dwMajorVersion != 6 || vi.dwMinorVersion != 0) + { + Func_SetDefaultDllDirectories setDllDirs = (Func_SetDefaultDllDirectories) + GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "SetDefaultDllDirectories"); + if (setDllDirs) + if (setDllDirs(MY_LOAD_LIBRARY_SEARCH_SYSTEM32 | MY_LOAD_LIBRARY_SEARCH_USER_DIRS)) + return; + } + + #endif +} + + +void LoadSecurityDlls() +{ + #ifndef UNDER_CE + + wchar_t buf[MAX_PATH + 100]; + + { + // at Vista (ver 6.0) : CoCreateInstance(CLSID_ShellLink, ...) doesn't work after SetDefaultDllDirectories() : Check it ??? + OSVERSIONINFO vi; + vi.dwOSVersionInfoSize = sizeof(vi); + if (!GetVersionEx(&vi) || vi.dwMajorVersion != 6 || vi.dwMinorVersion != 0) + { + Func_SetDefaultDllDirectories setDllDirs = (Func_SetDefaultDllDirectories) + GetProcAddress(GetModuleHandle(TEXT("kernel32.dll")), "SetDefaultDllDirectories"); + if (setDllDirs) + if (setDllDirs(MY_LOAD_LIBRARY_SEARCH_SYSTEM32 | MY_LOAD_LIBRARY_SEARCH_USER_DIRS)) + return; + } + } + + { + unsigned len = GetSystemDirectoryW(buf, MAX_PATH + 2); + if (len == 0 || len > MAX_PATH) + return; + } + { + const char *dll; + unsigned pos = (unsigned)lstrlenW(buf); + + if (buf[pos - 1] != '\\') + buf[pos++] = '\\'; + + for (dll = g_Dlls; dll[0] != 0;) + { + unsigned k = 0; + for (;;) + { + char c = *dll++; + buf[pos + k] = (Byte)c; + k++; + if (c == 0) + break; + } + + lstrcatW(buf, L".dll"); + LoadLibraryExW(buf, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + } + } + + #endif +} + +#endif diff --git a/3rdparty/7z/src/DllSecur.h b/3rdparty/7z/src/DllSecur.h new file mode 100644 index 0000000000..4c113568e6 --- /dev/null +++ b/3rdparty/7z/src/DllSecur.h @@ -0,0 +1,20 @@ +/* DllSecur.h -- DLL loading for security +2018-02-19 : Igor Pavlov : Public domain */ + +#ifndef __DLL_SECUR_H +#define __DLL_SECUR_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#ifdef _WIN32 + +void My_SetDefaultDllDirectories(); +void LoadSecurityDlls(); + +#endif + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/LzFind.c b/3rdparty/7z/src/LzFind.c new file mode 100644 index 0000000000..4eefc17dd2 --- /dev/null +++ b/3rdparty/7z/src/LzFind.c @@ -0,0 +1,1127 @@ +/* LzFind.c -- Match finder for LZ algorithms +2018-07-08 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "LzFind.h" +#include "LzHash.h" + +#define kEmptyHashValue 0 +#define kMaxValForNormalize ((UInt32)0xFFFFFFFF) +#define kNormalizeStepMin (1 << 10) /* it must be power of 2 */ +#define kNormalizeMask (~(UInt32)(kNormalizeStepMin - 1)) +#define kMaxHistorySize ((UInt32)7 << 29) + +#define kStartMaxLen 3 + +static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc) +{ + if (!p->directInput) + { + ISzAlloc_Free(alloc, p->bufferBase); + p->bufferBase = NULL; + } +} + +/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */ + +static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAllocPtr alloc) +{ + UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv; + if (p->directInput) + { + p->blockSize = blockSize; + return 1; + } + if (!p->bufferBase || p->blockSize != blockSize) + { + LzInWindow_Free(p, alloc); + p->blockSize = blockSize; + p->bufferBase = (Byte *)ISzAlloc_Alloc(alloc, (size_t)blockSize); + } + return (p->bufferBase != NULL); +} + +Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; } + +UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return p->streamPos - p->pos; } + +void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue) +{ + p->posLimit -= subValue; + p->pos -= subValue; + p->streamPos -= subValue; +} + +static void MatchFinder_ReadBlock(CMatchFinder *p) +{ + if (p->streamEndWasReached || p->result != SZ_OK) + return; + + /* We use (p->streamPos - p->pos) value. (p->streamPos < p->pos) is allowed. */ + + if (p->directInput) + { + UInt32 curSize = 0xFFFFFFFF - (p->streamPos - p->pos); + if (curSize > p->directInputRem) + curSize = (UInt32)p->directInputRem; + p->directInputRem -= curSize; + p->streamPos += curSize; + if (p->directInputRem == 0) + p->streamEndWasReached = 1; + return; + } + + for (;;) + { + Byte *dest = p->buffer + (p->streamPos - p->pos); + size_t size = (p->bufferBase + p->blockSize - dest); + if (size == 0) + return; + + p->result = ISeqInStream_Read(p->stream, dest, &size); + if (p->result != SZ_OK) + return; + if (size == 0) + { + p->streamEndWasReached = 1; + return; + } + p->streamPos += (UInt32)size; + if (p->streamPos - p->pos > p->keepSizeAfter) + return; + } +} + +void MatchFinder_MoveBlock(CMatchFinder *p) +{ + memmove(p->bufferBase, + p->buffer - p->keepSizeBefore, + (size_t)(p->streamPos - p->pos) + p->keepSizeBefore); + p->buffer = p->bufferBase + p->keepSizeBefore; +} + +int MatchFinder_NeedMove(CMatchFinder *p) +{ + if (p->directInput) + return 0; + /* if (p->streamEndWasReached) return 0; */ + return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter); +} + +void MatchFinder_ReadIfRequired(CMatchFinder *p) +{ + if (p->streamEndWasReached) + return; + if (p->keepSizeAfter >= p->streamPos - p->pos) + MatchFinder_ReadBlock(p); +} + +static void MatchFinder_CheckAndMoveAndRead(CMatchFinder *p) +{ + if (MatchFinder_NeedMove(p)) + MatchFinder_MoveBlock(p); + MatchFinder_ReadBlock(p); +} + +static void MatchFinder_SetDefaultSettings(CMatchFinder *p) +{ + p->cutValue = 32; + p->btMode = 1; + p->numHashBytes = 4; + p->bigHash = 0; +} + +#define kCrcPoly 0xEDB88320 + +void MatchFinder_Construct(CMatchFinder *p) +{ + unsigned i; + p->bufferBase = NULL; + p->directInput = 0; + p->hash = NULL; + p->expectedDataSize = (UInt64)(Int64)-1; + MatchFinder_SetDefaultSettings(p); + + for (i = 0; i < 256; i++) + { + UInt32 r = (UInt32)i; + unsigned j; + for (j = 0; j < 8; j++) + r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1))); + p->crc[i] = r; + } +} + +static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->hash); + p->hash = NULL; +} + +void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc) +{ + MatchFinder_FreeThisClassMemory(p, alloc); + LzInWindow_Free(p, alloc); +} + +static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc) +{ + size_t sizeInBytes = (size_t)num * sizeof(CLzRef); + if (sizeInBytes / sizeof(CLzRef) != num) + return NULL; + return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes); +} + +int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, + UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, + ISzAllocPtr alloc) +{ + UInt32 sizeReserv; + + if (historySize > kMaxHistorySize) + { + MatchFinder_Free(p, alloc); + return 0; + } + + sizeReserv = historySize >> 1; + if (historySize >= ((UInt32)3 << 30)) sizeReserv = historySize >> 3; + else if (historySize >= ((UInt32)2 << 30)) sizeReserv = historySize >> 2; + + sizeReserv += (keepAddBufferBefore + matchMaxLen + keepAddBufferAfter) / 2 + (1 << 19); + + p->keepSizeBefore = historySize + keepAddBufferBefore + 1; + p->keepSizeAfter = matchMaxLen + keepAddBufferAfter; + + /* we need one additional byte, since we use MoveBlock after pos++ and before dictionary using */ + + if (LzInWindow_Create(p, sizeReserv, alloc)) + { + UInt32 newCyclicBufferSize = historySize + 1; + UInt32 hs; + p->matchMaxLen = matchMaxLen; + { + p->fixedHashSize = 0; + if (p->numHashBytes == 2) + hs = (1 << 16) - 1; + else + { + hs = historySize; + if (hs > p->expectedDataSize) + hs = (UInt32)p->expectedDataSize; + if (hs != 0) + hs--; + hs |= (hs >> 1); + hs |= (hs >> 2); + hs |= (hs >> 4); + hs |= (hs >> 8); + hs >>= 1; + hs |= 0xFFFF; /* don't change it! It's required for Deflate */ + if (hs > (1 << 24)) + { + if (p->numHashBytes == 3) + hs = (1 << 24) - 1; + else + hs >>= 1; + /* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */ + } + } + p->hashMask = hs; + hs++; + if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size; + if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size; + if (p->numHashBytes > 4) p->fixedHashSize += kHash4Size; + hs += p->fixedHashSize; + } + + { + size_t newSize; + size_t numSons; + p->historySize = historySize; + p->hashSizeSum = hs; + p->cyclicBufferSize = newCyclicBufferSize; + + numSons = newCyclicBufferSize; + if (p->btMode) + numSons <<= 1; + newSize = hs + numSons; + + if (p->hash && p->numRefs == newSize) + return 1; + + MatchFinder_FreeThisClassMemory(p, alloc); + p->numRefs = newSize; + p->hash = AllocRefs(newSize, alloc); + + if (p->hash) + { + p->son = p->hash + p->hashSizeSum; + return 1; + } + } + } + + MatchFinder_Free(p, alloc); + return 0; +} + +static void MatchFinder_SetLimits(CMatchFinder *p) +{ + UInt32 limit = kMaxValForNormalize - p->pos; + UInt32 limit2 = p->cyclicBufferSize - p->cyclicBufferPos; + + if (limit2 < limit) + limit = limit2; + limit2 = p->streamPos - p->pos; + + if (limit2 <= p->keepSizeAfter) + { + if (limit2 > 0) + limit2 = 1; + } + else + limit2 -= p->keepSizeAfter; + + if (limit2 < limit) + limit = limit2; + + { + UInt32 lenLimit = p->streamPos - p->pos; + if (lenLimit > p->matchMaxLen) + lenLimit = p->matchMaxLen; + p->lenLimit = lenLimit; + } + p->posLimit = p->pos + limit; +} + + +void MatchFinder_Init_LowHash(CMatchFinder *p) +{ + size_t i; + CLzRef *items = p->hash; + size_t numItems = p->fixedHashSize; + for (i = 0; i < numItems; i++) + items[i] = kEmptyHashValue; +} + + +void MatchFinder_Init_HighHash(CMatchFinder *p) +{ + size_t i; + CLzRef *items = p->hash + p->fixedHashSize; + size_t numItems = (size_t)p->hashMask + 1; + for (i = 0; i < numItems; i++) + items[i] = kEmptyHashValue; +} + + +void MatchFinder_Init_3(CMatchFinder *p, int readData) +{ + p->cyclicBufferPos = 0; + p->buffer = p->bufferBase; + p->pos = + p->streamPos = p->cyclicBufferSize; + p->result = SZ_OK; + p->streamEndWasReached = 0; + + if (readData) + MatchFinder_ReadBlock(p); + + MatchFinder_SetLimits(p); +} + + +void MatchFinder_Init(CMatchFinder *p) +{ + MatchFinder_Init_HighHash(p); + MatchFinder_Init_LowHash(p); + MatchFinder_Init_3(p, True); +} + + +static UInt32 MatchFinder_GetSubValue(CMatchFinder *p) +{ + return (p->pos - p->historySize - 1) & kNormalizeMask; +} + +void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems) +{ + size_t i; + for (i = 0; i < numItems; i++) + { + UInt32 value = items[i]; + if (value <= subValue) + value = kEmptyHashValue; + else + value -= subValue; + items[i] = value; + } +} + +static void MatchFinder_Normalize(CMatchFinder *p) +{ + UInt32 subValue = MatchFinder_GetSubValue(p); + MatchFinder_Normalize3(subValue, p->hash, p->numRefs); + MatchFinder_ReduceOffsets(p, subValue); +} + + +MY_NO_INLINE +static void MatchFinder_CheckLimits(CMatchFinder *p) +{ + if (p->pos == kMaxValForNormalize) + MatchFinder_Normalize(p); + if (!p->streamEndWasReached && p->keepSizeAfter == p->streamPos - p->pos) + MatchFinder_CheckAndMoveAndRead(p); + if (p->cyclicBufferPos == p->cyclicBufferSize) + p->cyclicBufferPos = 0; + MatchFinder_SetLimits(p); +} + + +/* + (lenLimit > maxLen) +*/ +MY_FORCE_INLINE +static UInt32 * Hc_GetMatchesSpec(unsigned lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, + UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, + UInt32 *distances, unsigned maxLen) +{ + /* + son[_cyclicBufferPos] = curMatch; + for (;;) + { + UInt32 delta = pos - curMatch; + if (cutValue-- == 0 || delta >= _cyclicBufferSize) + return distances; + { + const Byte *pb = cur - delta; + curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)]; + if (pb[maxLen] == cur[maxLen] && *pb == *cur) + { + UInt32 len = 0; + while (++len != lenLimit) + if (pb[len] != cur[len]) + break; + if (maxLen < len) + { + maxLen = len; + *distances++ = len; + *distances++ = delta - 1; + if (len == lenLimit) + return distances; + } + } + } + } + */ + + const Byte *lim = cur + lenLimit; + son[_cyclicBufferPos] = curMatch; + do + { + UInt32 delta = pos - curMatch; + if (delta >= _cyclicBufferSize) + break; + { + ptrdiff_t diff; + curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)]; + diff = (ptrdiff_t)0 - delta; + if (cur[maxLen] == cur[maxLen + diff]) + { + const Byte *c = cur; + while (*c == c[diff]) + { + if (++c == lim) + { + distances[0] = (UInt32)(lim - cur); + distances[1] = delta - 1; + return distances + 2; + } + } + { + unsigned len = (unsigned)(c - cur); + if (maxLen < len) + { + maxLen = len; + distances[0] = (UInt32)len; + distances[1] = delta - 1; + distances += 2; + } + } + } + } + } + while (--cutValue); + + return distances; +} + + +MY_FORCE_INLINE +UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, + UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue, + UInt32 *distances, UInt32 maxLen) +{ + CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1; + CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1); + unsigned len0 = 0, len1 = 0; + for (;;) + { + UInt32 delta = pos - curMatch; + if (cutValue-- == 0 || delta >= _cyclicBufferSize) + { + *ptr0 = *ptr1 = kEmptyHashValue; + return distances; + } + { + CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); + const Byte *pb = cur - delta; + unsigned len = (len0 < len1 ? len0 : len1); + UInt32 pair0 = pair[0]; + if (pb[len] == cur[len]) + { + if (++len != lenLimit && pb[len] == cur[len]) + while (++len != lenLimit) + if (pb[len] != cur[len]) + break; + if (maxLen < len) + { + maxLen = (UInt32)len; + *distances++ = (UInt32)len; + *distances++ = delta - 1; + if (len == lenLimit) + { + *ptr1 = pair0; + *ptr0 = pair[1]; + return distances; + } + } + } + if (pb[len] < cur[len]) + { + *ptr1 = curMatch; + ptr1 = pair + 1; + curMatch = *ptr1; + len1 = len; + } + else + { + *ptr0 = curMatch; + ptr0 = pair; + curMatch = *ptr0; + len0 = len; + } + } + } +} + +static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son, + UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue) +{ + CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1; + CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1); + unsigned len0 = 0, len1 = 0; + for (;;) + { + UInt32 delta = pos - curMatch; + if (cutValue-- == 0 || delta >= _cyclicBufferSize) + { + *ptr0 = *ptr1 = kEmptyHashValue; + return; + } + { + CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1); + const Byte *pb = cur - delta; + unsigned len = (len0 < len1 ? len0 : len1); + if (pb[len] == cur[len]) + { + while (++len != lenLimit) + if (pb[len] != cur[len]) + break; + { + if (len == lenLimit) + { + *ptr1 = pair[0]; + *ptr0 = pair[1]; + return; + } + } + } + if (pb[len] < cur[len]) + { + *ptr1 = curMatch; + ptr1 = pair + 1; + curMatch = *ptr1; + len1 = len; + } + else + { + *ptr0 = curMatch; + ptr0 = pair; + curMatch = *ptr0; + len0 = len; + } + } + } +} + +#define MOVE_POS \ + ++p->cyclicBufferPos; \ + p->buffer++; \ + if (++p->pos == p->posLimit) MatchFinder_CheckLimits(p); + +#define MOVE_POS_RET MOVE_POS return (UInt32)offset; + +static void MatchFinder_MovePos(CMatchFinder *p) { MOVE_POS; } + +#define GET_MATCHES_HEADER2(minLen, ret_op) \ + unsigned lenLimit; UInt32 hv; const Byte *cur; UInt32 curMatch; \ + lenLimit = (unsigned)p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \ + cur = p->buffer; + +#define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return 0) +#define SKIP_HEADER(minLen) GET_MATCHES_HEADER2(minLen, continue) + +#define MF_PARAMS(p) p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue + +#define GET_MATCHES_FOOTER(offset, maxLen) \ + offset = (unsigned)(GetMatchesSpec1((UInt32)lenLimit, curMatch, MF_PARAMS(p), \ + distances + offset, (UInt32)maxLen) - distances); MOVE_POS_RET; + +#define SKIP_FOOTER \ + SkipMatchesSpec((UInt32)lenLimit, curMatch, MF_PARAMS(p)); MOVE_POS; + +#define UPDATE_maxLen { \ + ptrdiff_t diff = (ptrdiff_t)0 - d2; \ + const Byte *c = cur + maxLen; \ + const Byte *lim = cur + lenLimit; \ + for (; c != lim; c++) if (*(c + diff) != *c) break; \ + maxLen = (unsigned)(c - cur); } + +static UInt32 Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + unsigned offset; + GET_MATCHES_HEADER(2) + HASH2_CALC; + curMatch = p->hash[hv]; + p->hash[hv] = p->pos; + offset = 0; + GET_MATCHES_FOOTER(offset, 1) +} + +UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + unsigned offset; + GET_MATCHES_HEADER(3) + HASH_ZIP_CALC; + curMatch = p->hash[hv]; + p->hash[hv] = p->pos; + offset = 0; + GET_MATCHES_FOOTER(offset, 2) +} + +static UInt32 Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + UInt32 h2, d2, pos; + unsigned maxLen, offset; + UInt32 *hash; + GET_MATCHES_HEADER(3) + + HASH3_CALC; + + hash = p->hash; + pos = p->pos; + + d2 = pos - hash[h2]; + + curMatch = (hash + kFix3HashSize)[hv]; + + hash[h2] = pos; + (hash + kFix3HashSize)[hv] = pos; + + maxLen = 2; + offset = 0; + + if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur) + { + UPDATE_maxLen + distances[0] = (UInt32)maxLen; + distances[1] = d2 - 1; + offset = 2; + if (maxLen == lenLimit) + { + SkipMatchesSpec((UInt32)lenLimit, curMatch, MF_PARAMS(p)); + MOVE_POS_RET; + } + } + + GET_MATCHES_FOOTER(offset, maxLen) +} + +static UInt32 Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + UInt32 h2, h3, d2, d3, pos; + unsigned maxLen, offset; + UInt32 *hash; + GET_MATCHES_HEADER(4) + + HASH4_CALC; + + hash = p->hash; + pos = p->pos; + + d2 = pos - hash [h2]; + d3 = pos - (hash + kFix3HashSize)[h3]; + + curMatch = (hash + kFix4HashSize)[hv]; + + hash [h2] = pos; + (hash + kFix3HashSize)[h3] = pos; + (hash + kFix4HashSize)[hv] = pos; + + maxLen = 0; + offset = 0; + + if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur) + { + maxLen = 2; + distances[0] = 2; + distances[1] = d2 - 1; + offset = 2; + } + + if (d2 != d3 && d3 < p->cyclicBufferSize && *(cur - d3) == *cur) + { + maxLen = 3; + distances[(size_t)offset + 1] = d3 - 1; + offset += 2; + d2 = d3; + } + + if (offset != 0) + { + UPDATE_maxLen + distances[(size_t)offset - 2] = (UInt32)maxLen; + if (maxLen == lenLimit) + { + SkipMatchesSpec((UInt32)lenLimit, curMatch, MF_PARAMS(p)); + MOVE_POS_RET; + } + } + + if (maxLen < 3) + maxLen = 3; + + GET_MATCHES_FOOTER(offset, maxLen) +} + +/* +static UInt32 Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + UInt32 h2, h3, h4, d2, d3, d4, maxLen, offset, pos; + UInt32 *hash; + GET_MATCHES_HEADER(5) + + HASH5_CALC; + + hash = p->hash; + pos = p->pos; + + d2 = pos - hash [h2]; + d3 = pos - (hash + kFix3HashSize)[h3]; + d4 = pos - (hash + kFix4HashSize)[h4]; + + curMatch = (hash + kFix5HashSize)[hv]; + + hash [h2] = pos; + (hash + kFix3HashSize)[h3] = pos; + (hash + kFix4HashSize)[h4] = pos; + (hash + kFix5HashSize)[hv] = pos; + + maxLen = 0; + offset = 0; + + if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur) + { + distances[0] = maxLen = 2; + distances[1] = d2 - 1; + offset = 2; + if (*(cur - d2 + 2) == cur[2]) + distances[0] = maxLen = 3; + else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur) + { + distances[2] = maxLen = 3; + distances[3] = d3 - 1; + offset = 4; + d2 = d3; + } + } + else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur) + { + distances[0] = maxLen = 3; + distances[1] = d3 - 1; + offset = 2; + d2 = d3; + } + + if (d2 != d4 && d4 < p->cyclicBufferSize + && *(cur - d4) == *cur + && *(cur - d4 + 3) == *(cur + 3)) + { + maxLen = 4; + distances[(size_t)offset + 1] = d4 - 1; + offset += 2; + d2 = d4; + } + + if (offset != 0) + { + UPDATE_maxLen + distances[(size_t)offset - 2] = maxLen; + if (maxLen == lenLimit) + { + SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p)); + MOVE_POS_RET; + } + } + + if (maxLen < 4) + maxLen = 4; + + GET_MATCHES_FOOTER(offset, maxLen) +} +*/ + +static UInt32 Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + UInt32 h2, h3, d2, d3, pos; + unsigned maxLen, offset; + UInt32 *hash; + GET_MATCHES_HEADER(4) + + HASH4_CALC; + + hash = p->hash; + pos = p->pos; + + d2 = pos - hash [h2]; + d3 = pos - (hash + kFix3HashSize)[h3]; + curMatch = (hash + kFix4HashSize)[hv]; + + hash [h2] = pos; + (hash + kFix3HashSize)[h3] = pos; + (hash + kFix4HashSize)[hv] = pos; + + maxLen = 0; + offset = 0; + + if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur) + { + maxLen = 2; + distances[0] = 2; + distances[1] = d2 - 1; + offset = 2; + } + + if (d2 != d3 && d3 < p->cyclicBufferSize && *(cur - d3) == *cur) + { + maxLen = 3; + distances[(size_t)offset + 1] = d3 - 1; + offset += 2; + d2 = d3; + } + + if (offset != 0) + { + UPDATE_maxLen + distances[(size_t)offset - 2] = (UInt32)maxLen; + if (maxLen == lenLimit) + { + p->son[p->cyclicBufferPos] = curMatch; + MOVE_POS_RET; + } + } + + if (maxLen < 3) + maxLen = 3; + + offset = (unsigned)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), + distances + offset, maxLen) - (distances)); + MOVE_POS_RET +} + +/* +static UInt32 Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + UInt32 h2, h3, h4, d2, d3, d4, maxLen, offset, pos + UInt32 *hash; + GET_MATCHES_HEADER(5) + + HASH5_CALC; + + hash = p->hash; + pos = p->pos; + + d2 = pos - hash [h2]; + d3 = pos - (hash + kFix3HashSize)[h3]; + d4 = pos - (hash + kFix4HashSize)[h4]; + + curMatch = (hash + kFix5HashSize)[hv]; + + hash [h2] = pos; + (hash + kFix3HashSize)[h3] = pos; + (hash + kFix4HashSize)[h4] = pos; + (hash + kFix5HashSize)[hv] = pos; + + maxLen = 0; + offset = 0; + + if (d2 < p->cyclicBufferSize && *(cur - d2) == *cur) + { + distances[0] = maxLen = 2; + distances[1] = d2 - 1; + offset = 2; + if (*(cur - d2 + 2) == cur[2]) + distances[0] = maxLen = 3; + else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur) + { + distances[2] = maxLen = 3; + distances[3] = d3 - 1; + offset = 4; + d2 = d3; + } + } + else if (d3 < p->cyclicBufferSize && *(cur - d3) == *cur) + { + distances[0] = maxLen = 3; + distances[1] = d3 - 1; + offset = 2; + d2 = d3; + } + + if (d2 != d4 && d4 < p->cyclicBufferSize + && *(cur - d4) == *cur + && *(cur - d4 + 3) == *(cur + 3)) + { + maxLen = 4; + distances[(size_t)offset + 1] = d4 - 1; + offset += 2; + d2 = d4; + } + + if (offset != 0) + { + UPDATE_maxLen + distances[(size_t)offset - 2] = maxLen; + if (maxLen == lenLimit) + { + p->son[p->cyclicBufferPos] = curMatch; + MOVE_POS_RET; + } + } + + if (maxLen < 4) + maxLen = 4; + + offset = (UInt32)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), + distances + offset, maxLen) - (distances)); + MOVE_POS_RET +} +*/ + +UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances) +{ + unsigned offset; + GET_MATCHES_HEADER(3) + HASH_ZIP_CALC; + curMatch = p->hash[hv]; + p->hash[hv] = p->pos; + offset = (unsigned)(Hc_GetMatchesSpec(lenLimit, curMatch, MF_PARAMS(p), + distances, 2) - (distances)); + MOVE_POS_RET +} + +static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + SKIP_HEADER(2) + HASH2_CALC; + curMatch = p->hash[hv]; + p->hash[hv] = p->pos; + SKIP_FOOTER + } + while (--num != 0); +} + +void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + SKIP_HEADER(3) + HASH_ZIP_CALC; + curMatch = p->hash[hv]; + p->hash[hv] = p->pos; + SKIP_FOOTER + } + while (--num != 0); +} + +static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + UInt32 h2; + UInt32 *hash; + SKIP_HEADER(3) + HASH3_CALC; + hash = p->hash; + curMatch = (hash + kFix3HashSize)[hv]; + hash[h2] = + (hash + kFix3HashSize)[hv] = p->pos; + SKIP_FOOTER + } + while (--num != 0); +} + +static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + UInt32 h2, h3; + UInt32 *hash; + SKIP_HEADER(4) + HASH4_CALC; + hash = p->hash; + curMatch = (hash + kFix4HashSize)[hv]; + hash [h2] = + (hash + kFix3HashSize)[h3] = + (hash + kFix4HashSize)[hv] = p->pos; + SKIP_FOOTER + } + while (--num != 0); +} + +/* +static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + UInt32 h2, h3, h4; + UInt32 *hash; + SKIP_HEADER(5) + HASH5_CALC; + hash = p->hash; + curMatch = (hash + kFix5HashSize)[hv]; + hash [h2] = + (hash + kFix3HashSize)[h3] = + (hash + kFix4HashSize)[h4] = + (hash + kFix5HashSize)[hv] = p->pos; + SKIP_FOOTER + } + while (--num != 0); +} +*/ + +static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + UInt32 h2, h3; + UInt32 *hash; + SKIP_HEADER(4) + HASH4_CALC; + hash = p->hash; + curMatch = (hash + kFix4HashSize)[hv]; + hash [h2] = + (hash + kFix3HashSize)[h3] = + (hash + kFix4HashSize)[hv] = p->pos; + p->son[p->cyclicBufferPos] = curMatch; + MOVE_POS + } + while (--num != 0); +} + +/* +static void Hc5_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + UInt32 h2, h3, h4; + UInt32 *hash; + SKIP_HEADER(5) + HASH5_CALC; + hash = p->hash; + curMatch = hash + kFix5HashSize)[hv]; + hash [h2] = + (hash + kFix3HashSize)[h3] = + (hash + kFix4HashSize)[h4] = + (hash + kFix5HashSize)[hv] = p->pos; + p->son[p->cyclicBufferPos] = curMatch; + MOVE_POS + } + while (--num != 0); +} +*/ + +void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num) +{ + do + { + SKIP_HEADER(3) + HASH_ZIP_CALC; + curMatch = p->hash[hv]; + p->hash[hv] = p->pos; + p->son[p->cyclicBufferPos] = curMatch; + MOVE_POS + } + while (--num != 0); +} + +void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable) +{ + vTable->Init = (Mf_Init_Func)MatchFinder_Init; + vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes; + vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos; + if (!p->btMode) + { + /* if (p->numHashBytes <= 4) */ + { + vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches; + vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip; + } + /* + else + { + vTable->GetMatches = (Mf_GetMatches_Func)Hc5_MatchFinder_GetMatches; + vTable->Skip = (Mf_Skip_Func)Hc5_MatchFinder_Skip; + } + */ + } + else if (p->numHashBytes == 2) + { + vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches; + vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip; + } + else if (p->numHashBytes == 3) + { + vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches; + vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip; + } + else /* if (p->numHashBytes == 4) */ + { + vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches; + vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip; + } + /* + else + { + vTable->GetMatches = (Mf_GetMatches_Func)Bt5_MatchFinder_GetMatches; + vTable->Skip = (Mf_Skip_Func)Bt5_MatchFinder_Skip; + } + */ +} diff --git a/3rdparty/7z/src/LzFind.h b/3rdparty/7z/src/LzFind.h new file mode 100644 index 0000000000..c77added7b --- /dev/null +++ b/3rdparty/7z/src/LzFind.h @@ -0,0 +1,121 @@ +/* LzFind.h -- Match finder for LZ algorithms +2017-06-10 : Igor Pavlov : Public domain */ + +#ifndef __LZ_FIND_H +#define __LZ_FIND_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +typedef UInt32 CLzRef; + +typedef struct _CMatchFinder +{ + Byte *buffer; + UInt32 pos; + UInt32 posLimit; + UInt32 streamPos; + UInt32 lenLimit; + + UInt32 cyclicBufferPos; + UInt32 cyclicBufferSize; /* it must be = (historySize + 1) */ + + Byte streamEndWasReached; + Byte btMode; + Byte bigHash; + Byte directInput; + + UInt32 matchMaxLen; + CLzRef *hash; + CLzRef *son; + UInt32 hashMask; + UInt32 cutValue; + + Byte *bufferBase; + ISeqInStream *stream; + + UInt32 blockSize; + UInt32 keepSizeBefore; + UInt32 keepSizeAfter; + + UInt32 numHashBytes; + size_t directInputRem; + UInt32 historySize; + UInt32 fixedHashSize; + UInt32 hashSizeSum; + SRes result; + UInt32 crc[256]; + size_t numRefs; + + UInt64 expectedDataSize; +} CMatchFinder; + +#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer) + +#define Inline_MatchFinder_GetNumAvailableBytes(p) ((p)->streamPos - (p)->pos) + +#define Inline_MatchFinder_IsFinishedOK(p) \ + ((p)->streamEndWasReached \ + && (p)->streamPos == (p)->pos \ + && (!(p)->directInput || (p)->directInputRem == 0)) + +int MatchFinder_NeedMove(CMatchFinder *p); +Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p); +void MatchFinder_MoveBlock(CMatchFinder *p); +void MatchFinder_ReadIfRequired(CMatchFinder *p); + +void MatchFinder_Construct(CMatchFinder *p); + +/* Conditions: + historySize <= 3 GB + keepAddBufferBefore + matchMaxLen + keepAddBufferAfter < 511MB +*/ +int MatchFinder_Create(CMatchFinder *p, UInt32 historySize, + UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter, + ISzAllocPtr alloc); +void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc); +void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems); +void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue); + +UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *buffer, CLzRef *son, + UInt32 _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue, + UInt32 *distances, UInt32 maxLen); + +/* +Conditions: + Mf_GetNumAvailableBytes_Func must be called before each Mf_GetMatchLen_Func. + Mf_GetPointerToCurrentPos_Func's result must be used only before any other function +*/ + +typedef void (*Mf_Init_Func)(void *object); +typedef UInt32 (*Mf_GetNumAvailableBytes_Func)(void *object); +typedef const Byte * (*Mf_GetPointerToCurrentPos_Func)(void *object); +typedef UInt32 (*Mf_GetMatches_Func)(void *object, UInt32 *distances); +typedef void (*Mf_Skip_Func)(void *object, UInt32); + +typedef struct _IMatchFinder +{ + Mf_Init_Func Init; + Mf_GetNumAvailableBytes_Func GetNumAvailableBytes; + Mf_GetPointerToCurrentPos_Func GetPointerToCurrentPos; + Mf_GetMatches_Func GetMatches; + Mf_Skip_Func Skip; +} IMatchFinder; + +void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable); + +void MatchFinder_Init_LowHash(CMatchFinder *p); +void MatchFinder_Init_HighHash(CMatchFinder *p); +void MatchFinder_Init_3(CMatchFinder *p, int readData); +void MatchFinder_Init(CMatchFinder *p); + +UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); +UInt32 Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances); + +void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); +void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/LzFindMt.c b/3rdparty/7z/src/LzFindMt.c new file mode 100644 index 0000000000..df32146f92 --- /dev/null +++ b/3rdparty/7z/src/LzFindMt.c @@ -0,0 +1,853 @@ +/* LzFindMt.c -- multithreaded Match finder for LZ algorithms +2018-12-29 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "LzHash.h" + +#include "LzFindMt.h" + +static void MtSync_Construct(CMtSync *p) +{ + p->wasCreated = False; + p->csWasInitialized = False; + p->csWasEntered = False; + Thread_Construct(&p->thread); + Event_Construct(&p->canStart); + Event_Construct(&p->wasStarted); + Event_Construct(&p->wasStopped); + Semaphore_Construct(&p->freeSemaphore); + Semaphore_Construct(&p->filledSemaphore); +} + +static void MtSync_GetNextBlock(CMtSync *p) +{ + if (p->needStart) + { + p->numProcessedBlocks = 1; + p->needStart = False; + p->stopWriting = False; + p->exit = False; + Event_Reset(&p->wasStarted); + Event_Reset(&p->wasStopped); + + Event_Set(&p->canStart); + Event_Wait(&p->wasStarted); + + // if (mt) MatchFinder_Init_LowHash(mt->MatchFinder); + } + else + { + CriticalSection_Leave(&p->cs); + p->csWasEntered = False; + p->numProcessedBlocks++; + Semaphore_Release1(&p->freeSemaphore); + } + Semaphore_Wait(&p->filledSemaphore); + CriticalSection_Enter(&p->cs); + p->csWasEntered = True; +} + +/* MtSync_StopWriting must be called if Writing was started */ + +static void MtSync_StopWriting(CMtSync *p) +{ + UInt32 myNumBlocks = p->numProcessedBlocks; + if (!Thread_WasCreated(&p->thread) || p->needStart) + return; + p->stopWriting = True; + if (p->csWasEntered) + { + CriticalSection_Leave(&p->cs); + p->csWasEntered = False; + } + Semaphore_Release1(&p->freeSemaphore); + + Event_Wait(&p->wasStopped); + + while (myNumBlocks++ != p->numProcessedBlocks) + { + Semaphore_Wait(&p->filledSemaphore); + Semaphore_Release1(&p->freeSemaphore); + } + p->needStart = True; +} + +static void MtSync_Destruct(CMtSync *p) +{ + if (Thread_WasCreated(&p->thread)) + { + MtSync_StopWriting(p); + p->exit = True; + if (p->needStart) + Event_Set(&p->canStart); + Thread_Wait(&p->thread); + Thread_Close(&p->thread); + } + if (p->csWasInitialized) + { + CriticalSection_Delete(&p->cs); + p->csWasInitialized = False; + } + + Event_Close(&p->canStart); + Event_Close(&p->wasStarted); + Event_Close(&p->wasStopped); + Semaphore_Close(&p->freeSemaphore); + Semaphore_Close(&p->filledSemaphore); + + p->wasCreated = False; +} + +#define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; } + +static SRes MtSync_Create2(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj, UInt32 numBlocks) +{ + if (p->wasCreated) + return SZ_OK; + + RINOK_THREAD(CriticalSection_Init(&p->cs)); + p->csWasInitialized = True; + + RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->canStart)); + RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStarted)); + RINOK_THREAD(AutoResetEvent_CreateNotSignaled(&p->wasStopped)); + + RINOK_THREAD(Semaphore_Create(&p->freeSemaphore, numBlocks, numBlocks)); + RINOK_THREAD(Semaphore_Create(&p->filledSemaphore, 0, numBlocks)); + + p->needStart = True; + + RINOK_THREAD(Thread_Create(&p->thread, startAddress, obj)); + p->wasCreated = True; + return SZ_OK; +} + +static SRes MtSync_Create(CMtSync *p, THREAD_FUNC_TYPE startAddress, void *obj, UInt32 numBlocks) +{ + SRes res = MtSync_Create2(p, startAddress, obj, numBlocks); + if (res != SZ_OK) + MtSync_Destruct(p); + return res; +} + +void MtSync_Init(CMtSync *p) { p->needStart = True; } + +#define kMtMaxValForNormalize 0xFFFFFFFF + +#define DEF_GetHeads2(name, v, action) \ + static void GetHeads ## name(const Byte *p, UInt32 pos, \ + UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc) \ + { action; for (; numHeads != 0; numHeads--) { \ + const UInt32 value = (v); p++; *heads++ = pos - hash[value]; hash[value] = pos++; } } + +#define DEF_GetHeads(name, v) DEF_GetHeads2(name, v, ;) + +DEF_GetHeads2(2, (p[0] | ((UInt32)p[1] << 8)), UNUSED_VAR(hashMask); UNUSED_VAR(crc); ) +DEF_GetHeads(3, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8)) & hashMask) +DEF_GetHeads(4, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8) ^ (crc[p[3]] << 5)) & hashMask) +DEF_GetHeads(4b, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8) ^ ((UInt32)p[3] << 16)) & hashMask) +/* DEF_GetHeads(5, (crc[p[0]] ^ p[1] ^ ((UInt32)p[2] << 8) ^ (crc[p[3]] << 5) ^ (crc[p[4]] << 3)) & hashMask) */ + +static void HashThreadFunc(CMatchFinderMt *mt) +{ + CMtSync *p = &mt->hashSync; + for (;;) + { + UInt32 numProcessedBlocks = 0; + Event_Wait(&p->canStart); + Event_Set(&p->wasStarted); + + MatchFinder_Init_HighHash(mt->MatchFinder); + + for (;;) + { + if (p->exit) + return; + if (p->stopWriting) + { + p->numProcessedBlocks = numProcessedBlocks; + Event_Set(&p->wasStopped); + break; + } + + { + CMatchFinder *mf = mt->MatchFinder; + if (MatchFinder_NeedMove(mf)) + { + CriticalSection_Enter(&mt->btSync.cs); + CriticalSection_Enter(&mt->hashSync.cs); + { + const Byte *beforePtr = Inline_MatchFinder_GetPointerToCurrentPos(mf); + ptrdiff_t offset; + MatchFinder_MoveBlock(mf); + offset = beforePtr - Inline_MatchFinder_GetPointerToCurrentPos(mf); + mt->pointerToCurPos -= offset; + mt->buffer -= offset; + } + CriticalSection_Leave(&mt->btSync.cs); + CriticalSection_Leave(&mt->hashSync.cs); + continue; + } + + Semaphore_Wait(&p->freeSemaphore); + + MatchFinder_ReadIfRequired(mf); + if (mf->pos > (kMtMaxValForNormalize - kMtHashBlockSize)) + { + UInt32 subValue = (mf->pos - mf->historySize - 1); + MatchFinder_ReduceOffsets(mf, subValue); + MatchFinder_Normalize3(subValue, mf->hash + mf->fixedHashSize, (size_t)mf->hashMask + 1); + } + { + UInt32 *heads = mt->hashBuf + ((numProcessedBlocks++) & kMtHashNumBlocksMask) * kMtHashBlockSize; + UInt32 num = mf->streamPos - mf->pos; + heads[0] = 2; + heads[1] = num; + if (num >= mf->numHashBytes) + { + num = num - mf->numHashBytes + 1; + if (num > kMtHashBlockSize - 2) + num = kMtHashBlockSize - 2; + mt->GetHeadsFunc(mf->buffer, mf->pos, mf->hash + mf->fixedHashSize, mf->hashMask, heads + 2, num, mf->crc); + heads[0] = 2 + num; + } + mf->pos += num; + mf->buffer += num; + } + } + + Semaphore_Release1(&p->filledSemaphore); + } + } +} + +static void MatchFinderMt_GetNextBlock_Hash(CMatchFinderMt *p) +{ + MtSync_GetNextBlock(&p->hashSync); + p->hashBufPosLimit = p->hashBufPos = ((p->hashSync.numProcessedBlocks - 1) & kMtHashNumBlocksMask) * kMtHashBlockSize; + p->hashBufPosLimit += p->hashBuf[p->hashBufPos++]; + p->hashNumAvail = p->hashBuf[p->hashBufPos++]; +} + +#define kEmptyHashValue 0 + +#define MFMT_GM_INLINE + +#ifdef MFMT_GM_INLINE + +/* + we use size_t for _cyclicBufferPos instead of UInt32 + to eliminate "movsx" BUG in old MSVC x64 compiler. +*/ + +MY_NO_INLINE +static UInt32 *GetMatchesSpecN(UInt32 lenLimit, UInt32 pos, const Byte *cur, CLzRef *son, + size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 _cutValue, + UInt32 *distances, UInt32 _maxLen, const UInt32 *hash, const UInt32 *limit, UInt32 size, UInt32 *posRes) +{ + do + { + UInt32 *_distances = ++distances; + UInt32 delta = *hash++; + + CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1; + CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1); + unsigned len0 = 0, len1 = 0; + UInt32 cutValue = _cutValue; + unsigned maxLen = (unsigned)_maxLen; + + /* + if (size > 1) + { + UInt32 delta = *hash; + if (delta < _cyclicBufferSize) + { + UInt32 cyc1 = _cyclicBufferPos + 1; + CLzRef *pair = son + ((size_t)(cyc1 - delta + ((delta > cyc1) ? _cyclicBufferSize : 0)) << 1); + Byte b = *(cur + 1 - delta); + _distances[0] = pair[0]; + _distances[1] = b; + } + } + */ + if (cutValue == 0 || delta >= _cyclicBufferSize) + { + *ptr0 = *ptr1 = kEmptyHashValue; + } + else + for(;;) + { + { + CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((_cyclicBufferPos < delta) ? _cyclicBufferSize : 0)) << 1); + const Byte *pb = cur - delta; + unsigned len = (len0 < len1 ? len0 : len1); + UInt32 pair0 = *pair; + if (pb[len] == cur[len]) + { + if (++len != lenLimit && pb[len] == cur[len]) + while (++len != lenLimit) + if (pb[len] != cur[len]) + break; + if (maxLen < len) + { + maxLen = len; + *distances++ = (UInt32)len; + *distances++ = delta - 1; + if (len == lenLimit) + { + UInt32 pair1 = pair[1]; + *ptr1 = pair0; + *ptr0 = pair1; + break; + } + } + } + { + UInt32 curMatch = pos - delta; + // delta = pos - *pair; + // delta = pos - pair[((UInt32)pb[len] - (UInt32)cur[len]) >> 31]; + if (pb[len] < cur[len]) + { + delta = pos - pair[1]; + *ptr1 = curMatch; + ptr1 = pair + 1; + len1 = len; + } + else + { + delta = pos - *pair; + *ptr0 = curMatch; + ptr0 = pair; + len0 = len; + } + } + } + if (--cutValue == 0 || delta >= _cyclicBufferSize) + { + *ptr0 = *ptr1 = kEmptyHashValue; + break; + } + } + pos++; + _cyclicBufferPos++; + cur++; + { + UInt32 num = (UInt32)(distances - _distances); + _distances[-1] = num; + } + } + while (distances < limit && --size != 0); + *posRes = pos; + return distances; +} + +#endif + + + +static void BtGetMatches(CMatchFinderMt *p, UInt32 *distances) +{ + UInt32 numProcessed = 0; + UInt32 curPos = 2; + UInt32 limit = kMtBtBlockSize - (p->matchMaxLen * 2); // * 2 + + distances[1] = p->hashNumAvail; + + while (curPos < limit) + { + if (p->hashBufPos == p->hashBufPosLimit) + { + MatchFinderMt_GetNextBlock_Hash(p); + distances[1] = numProcessed + p->hashNumAvail; + if (p->hashNumAvail >= p->numHashBytes) + continue; + distances[0] = curPos + p->hashNumAvail; + distances += curPos; + for (; p->hashNumAvail != 0; p->hashNumAvail--) + *distances++ = 0; + return; + } + { + UInt32 size = p->hashBufPosLimit - p->hashBufPos; + UInt32 lenLimit = p->matchMaxLen; + UInt32 pos = p->pos; + UInt32 cyclicBufferPos = p->cyclicBufferPos; + if (lenLimit >= p->hashNumAvail) + lenLimit = p->hashNumAvail; + { + UInt32 size2 = p->hashNumAvail - lenLimit + 1; + if (size2 < size) + size = size2; + size2 = p->cyclicBufferSize - cyclicBufferPos; + if (size2 < size) + size = size2; + } + + #ifndef MFMT_GM_INLINE + while (curPos < limit && size-- != 0) + { + UInt32 *startDistances = distances + curPos; + UInt32 num = (UInt32)(GetMatchesSpec1(lenLimit, pos - p->hashBuf[p->hashBufPos++], + pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue, + startDistances + 1, p->numHashBytes - 1) - startDistances); + *startDistances = num - 1; + curPos += num; + cyclicBufferPos++; + pos++; + p->buffer++; + } + #else + { + UInt32 posRes; + curPos = (UInt32)(GetMatchesSpecN(lenLimit, pos, p->buffer, p->son, cyclicBufferPos, p->cyclicBufferSize, p->cutValue, + distances + curPos, p->numHashBytes - 1, p->hashBuf + p->hashBufPos, + distances + limit, + size, &posRes) - distances); + p->hashBufPos += posRes - pos; + cyclicBufferPos += posRes - pos; + p->buffer += posRes - pos; + pos = posRes; + } + #endif + + numProcessed += pos - p->pos; + p->hashNumAvail -= pos - p->pos; + p->pos = pos; + if (cyclicBufferPos == p->cyclicBufferSize) + cyclicBufferPos = 0; + p->cyclicBufferPos = cyclicBufferPos; + } + } + + distances[0] = curPos; +} + +static void BtFillBlock(CMatchFinderMt *p, UInt32 globalBlockIndex) +{ + CMtSync *sync = &p->hashSync; + if (!sync->needStart) + { + CriticalSection_Enter(&sync->cs); + sync->csWasEntered = True; + } + + BtGetMatches(p, p->btBuf + (globalBlockIndex & kMtBtNumBlocksMask) * kMtBtBlockSize); + + if (p->pos > kMtMaxValForNormalize - kMtBtBlockSize) + { + UInt32 subValue = p->pos - p->cyclicBufferSize; + MatchFinder_Normalize3(subValue, p->son, (size_t)p->cyclicBufferSize * 2); + p->pos -= subValue; + } + + if (!sync->needStart) + { + CriticalSection_Leave(&sync->cs); + sync->csWasEntered = False; + } +} + +void BtThreadFunc(CMatchFinderMt *mt) +{ + CMtSync *p = &mt->btSync; + for (;;) + { + UInt32 blockIndex = 0; + Event_Wait(&p->canStart); + Event_Set(&p->wasStarted); + for (;;) + { + if (p->exit) + return; + if (p->stopWriting) + { + p->numProcessedBlocks = blockIndex; + MtSync_StopWriting(&mt->hashSync); + Event_Set(&p->wasStopped); + break; + } + Semaphore_Wait(&p->freeSemaphore); + BtFillBlock(mt, blockIndex++); + Semaphore_Release1(&p->filledSemaphore); + } + } +} + +void MatchFinderMt_Construct(CMatchFinderMt *p) +{ + p->hashBuf = NULL; + MtSync_Construct(&p->hashSync); + MtSync_Construct(&p->btSync); +} + +static void MatchFinderMt_FreeMem(CMatchFinderMt *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->hashBuf); + p->hashBuf = NULL; +} + +void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc) +{ + MtSync_Destruct(&p->hashSync); + MtSync_Destruct(&p->btSync); + MatchFinderMt_FreeMem(p, alloc); +} + +#define kHashBufferSize (kMtHashBlockSize * kMtHashNumBlocks) +#define kBtBufferSize (kMtBtBlockSize * kMtBtNumBlocks) + +static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE HashThreadFunc2(void *p) { HashThreadFunc((CMatchFinderMt *)p); return 0; } +static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE BtThreadFunc2(void *p) +{ + Byte allocaDummy[0x180]; + unsigned i = 0; + for (i = 0; i < 16; i++) + allocaDummy[i] = (Byte)0; + if (allocaDummy[0] == 0) + BtThreadFunc((CMatchFinderMt *)p); + return 0; +} + +SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore, + UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc) +{ + CMatchFinder *mf = p->MatchFinder; + p->historySize = historySize; + if (kMtBtBlockSize <= matchMaxLen * 4) + return SZ_ERROR_PARAM; + if (!p->hashBuf) + { + p->hashBuf = (UInt32 *)ISzAlloc_Alloc(alloc, (kHashBufferSize + kBtBufferSize) * sizeof(UInt32)); + if (!p->hashBuf) + return SZ_ERROR_MEM; + p->btBuf = p->hashBuf + kHashBufferSize; + } + keepAddBufferBefore += (kHashBufferSize + kBtBufferSize); + keepAddBufferAfter += kMtHashBlockSize; + if (!MatchFinder_Create(mf, historySize, keepAddBufferBefore, matchMaxLen, keepAddBufferAfter, alloc)) + return SZ_ERROR_MEM; + + RINOK(MtSync_Create(&p->hashSync, HashThreadFunc2, p, kMtHashNumBlocks)); + RINOK(MtSync_Create(&p->btSync, BtThreadFunc2, p, kMtBtNumBlocks)); + return SZ_OK; +} + +/* Call it after ReleaseStream / SetStream */ +static void MatchFinderMt_Init(CMatchFinderMt *p) +{ + CMatchFinder *mf = p->MatchFinder; + + p->btBufPos = + p->btBufPosLimit = 0; + p->hashBufPos = + p->hashBufPosLimit = 0; + + /* Init without data reading. We don't want to read data in this thread */ + MatchFinder_Init_3(mf, False); + MatchFinder_Init_LowHash(mf); + + p->pointerToCurPos = Inline_MatchFinder_GetPointerToCurrentPos(mf); + p->btNumAvailBytes = 0; + p->lzPos = p->historySize + 1; + + p->hash = mf->hash; + p->fixedHashSize = mf->fixedHashSize; + p->crc = mf->crc; + + p->son = mf->son; + p->matchMaxLen = mf->matchMaxLen; + p->numHashBytes = mf->numHashBytes; + p->pos = mf->pos; + p->buffer = mf->buffer; + p->cyclicBufferPos = mf->cyclicBufferPos; + p->cyclicBufferSize = mf->cyclicBufferSize; + p->cutValue = mf->cutValue; +} + +/* ReleaseStream is required to finish multithreading */ +void MatchFinderMt_ReleaseStream(CMatchFinderMt *p) +{ + MtSync_StopWriting(&p->btSync); + /* p->MatchFinder->ReleaseStream(); */ +} + +static void MatchFinderMt_Normalize(CMatchFinderMt *p) +{ + MatchFinder_Normalize3(p->lzPos - p->historySize - 1, p->hash, p->fixedHashSize); + p->lzPos = p->historySize + 1; +} + +static void MatchFinderMt_GetNextBlock_Bt(CMatchFinderMt *p) +{ + UInt32 blockIndex; + MtSync_GetNextBlock(&p->btSync); + blockIndex = ((p->btSync.numProcessedBlocks - 1) & kMtBtNumBlocksMask); + p->btBufPosLimit = p->btBufPos = blockIndex * kMtBtBlockSize; + p->btBufPosLimit += p->btBuf[p->btBufPos++]; + p->btNumAvailBytes = p->btBuf[p->btBufPos++]; + if (p->lzPos >= kMtMaxValForNormalize - kMtBtBlockSize) + MatchFinderMt_Normalize(p); +} + +static const Byte * MatchFinderMt_GetPointerToCurrentPos(CMatchFinderMt *p) +{ + return p->pointerToCurPos; +} + +#define GET_NEXT_BLOCK_IF_REQUIRED if (p->btBufPos == p->btBufPosLimit) MatchFinderMt_GetNextBlock_Bt(p); + +static UInt32 MatchFinderMt_GetNumAvailableBytes(CMatchFinderMt *p) +{ + GET_NEXT_BLOCK_IF_REQUIRED; + return p->btNumAvailBytes; +} + +static UInt32 * MixMatches2(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *distances) +{ + UInt32 h2, curMatch2; + UInt32 *hash = p->hash; + const Byte *cur = p->pointerToCurPos; + UInt32 lzPos = p->lzPos; + MT_HASH2_CALC + + curMatch2 = hash[h2]; + hash[h2] = lzPos; + + if (curMatch2 >= matchMinPos) + if (cur[(ptrdiff_t)curMatch2 - lzPos] == cur[0]) + { + *distances++ = 2; + *distances++ = lzPos - curMatch2 - 1; + } + + return distances; +} + +static UInt32 * MixMatches3(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *distances) +{ + UInt32 h2, h3, curMatch2, curMatch3; + UInt32 *hash = p->hash; + const Byte *cur = p->pointerToCurPos; + UInt32 lzPos = p->lzPos; + MT_HASH3_CALC + + curMatch2 = hash[ h2]; + curMatch3 = (hash + kFix3HashSize)[h3]; + + hash[ h2] = lzPos; + (hash + kFix3HashSize)[h3] = lzPos; + + if (curMatch2 >= matchMinPos && cur[(ptrdiff_t)curMatch2 - lzPos] == cur[0]) + { + distances[1] = lzPos - curMatch2 - 1; + if (cur[(ptrdiff_t)curMatch2 - lzPos + 2] == cur[2]) + { + distances[0] = 3; + return distances + 2; + } + distances[0] = 2; + distances += 2; + } + + if (curMatch3 >= matchMinPos && cur[(ptrdiff_t)curMatch3 - lzPos] == cur[0]) + { + *distances++ = 3; + *distances++ = lzPos - curMatch3 - 1; + } + + return distances; +} + +/* +static UInt32 *MixMatches4(CMatchFinderMt *p, UInt32 matchMinPos, UInt32 *distances) +{ + UInt32 h2, h3, h4, curMatch2, curMatch3, curMatch4; + UInt32 *hash = p->hash; + const Byte *cur = p->pointerToCurPos; + UInt32 lzPos = p->lzPos; + MT_HASH4_CALC + + curMatch2 = hash[ h2]; + curMatch3 = (hash + kFix3HashSize)[h3]; + curMatch4 = (hash + kFix4HashSize)[h4]; + + hash[ h2] = lzPos; + (hash + kFix3HashSize)[h3] = lzPos; + (hash + kFix4HashSize)[h4] = lzPos; + + if (curMatch2 >= matchMinPos && cur[(ptrdiff_t)curMatch2 - lzPos] == cur[0]) + { + distances[1] = lzPos - curMatch2 - 1; + if (cur[(ptrdiff_t)curMatch2 - lzPos + 2] == cur[2]) + { + distances[0] = (cur[(ptrdiff_t)curMatch2 - lzPos + 3] == cur[3]) ? 4 : 3; + return distances + 2; + } + distances[0] = 2; + distances += 2; + } + + if (curMatch3 >= matchMinPos && cur[(ptrdiff_t)curMatch3 - lzPos] == cur[0]) + { + distances[1] = lzPos - curMatch3 - 1; + if (cur[(ptrdiff_t)curMatch3 - lzPos + 3] == cur[3]) + { + distances[0] = 4; + return distances + 2; + } + distances[0] = 3; + distances += 2; + } + + if (curMatch4 >= matchMinPos) + if ( + cur[(ptrdiff_t)curMatch4 - lzPos] == cur[0] && + cur[(ptrdiff_t)curMatch4 - lzPos + 3] == cur[3] + ) + { + *distances++ = 4; + *distances++ = lzPos - curMatch4 - 1; + } + + return distances; +} +*/ + +#define INCREASE_LZ_POS p->lzPos++; p->pointerToCurPos++; + +static UInt32 MatchFinderMt2_GetMatches(CMatchFinderMt *p, UInt32 *distances) +{ + const UInt32 *btBuf = p->btBuf + p->btBufPos; + UInt32 len = *btBuf++; + p->btBufPos += 1 + len; + p->btNumAvailBytes--; + { + UInt32 i; + for (i = 0; i < len; i += 2) + { + UInt32 v0 = btBuf[0]; + UInt32 v1 = btBuf[1]; + btBuf += 2; + distances[0] = v0; + distances[1] = v1; + distances += 2; + } + } + INCREASE_LZ_POS + return len; +} + +static UInt32 MatchFinderMt_GetMatches(CMatchFinderMt *p, UInt32 *distances) +{ + const UInt32 *btBuf = p->btBuf + p->btBufPos; + UInt32 len = *btBuf++; + p->btBufPos += 1 + len; + + if (len == 0) + { + /* change for bt5 ! */ + if (p->btNumAvailBytes-- >= 4) + len = (UInt32)(p->MixMatchesFunc(p, p->lzPos - p->historySize, distances) - (distances)); + } + else + { + /* Condition: there are matches in btBuf with length < p->numHashBytes */ + UInt32 *distances2; + p->btNumAvailBytes--; + distances2 = p->MixMatchesFunc(p, p->lzPos - btBuf[1], distances); + do + { + UInt32 v0 = btBuf[0]; + UInt32 v1 = btBuf[1]; + btBuf += 2; + distances2[0] = v0; + distances2[1] = v1; + distances2 += 2; + } + while ((len -= 2) != 0); + len = (UInt32)(distances2 - (distances)); + } + INCREASE_LZ_POS + return len; +} + +#define SKIP_HEADER2_MT do { GET_NEXT_BLOCK_IF_REQUIRED +#define SKIP_HEADER_MT(n) SKIP_HEADER2_MT if (p->btNumAvailBytes-- >= (n)) { const Byte *cur = p->pointerToCurPos; UInt32 *hash = p->hash; +#define SKIP_FOOTER_MT } INCREASE_LZ_POS p->btBufPos += p->btBuf[p->btBufPos] + 1; } while (--num != 0); + +static void MatchFinderMt0_Skip(CMatchFinderMt *p, UInt32 num) +{ + SKIP_HEADER2_MT { p->btNumAvailBytes--; + SKIP_FOOTER_MT +} + +static void MatchFinderMt2_Skip(CMatchFinderMt *p, UInt32 num) +{ + SKIP_HEADER_MT(2) + UInt32 h2; + MT_HASH2_CALC + hash[h2] = p->lzPos; + SKIP_FOOTER_MT +} + +static void MatchFinderMt3_Skip(CMatchFinderMt *p, UInt32 num) +{ + SKIP_HEADER_MT(3) + UInt32 h2, h3; + MT_HASH3_CALC + (hash + kFix3HashSize)[h3] = + hash[ h2] = + p->lzPos; + SKIP_FOOTER_MT +} + +/* +static void MatchFinderMt4_Skip(CMatchFinderMt *p, UInt32 num) +{ + SKIP_HEADER_MT(4) + UInt32 h2, h3, h4; + MT_HASH4_CALC + (hash + kFix4HashSize)[h4] = + (hash + kFix3HashSize)[h3] = + hash[ h2] = + p->lzPos; + SKIP_FOOTER_MT +} +*/ + +void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder *vTable) +{ + vTable->Init = (Mf_Init_Func)MatchFinderMt_Init; + vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinderMt_GetNumAvailableBytes; + vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinderMt_GetPointerToCurrentPos; + vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt_GetMatches; + + switch (p->MatchFinder->numHashBytes) + { + case 2: + p->GetHeadsFunc = GetHeads2; + p->MixMatchesFunc = (Mf_Mix_Matches)NULL; + vTable->Skip = (Mf_Skip_Func)MatchFinderMt0_Skip; + vTable->GetMatches = (Mf_GetMatches_Func)MatchFinderMt2_GetMatches; + break; + case 3: + p->GetHeadsFunc = GetHeads3; + p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches2; + vTable->Skip = (Mf_Skip_Func)MatchFinderMt2_Skip; + break; + default: + /* case 4: */ + p->GetHeadsFunc = p->MatchFinder->bigHash ? GetHeads4b : GetHeads4; + p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches3; + vTable->Skip = (Mf_Skip_Func)MatchFinderMt3_Skip; + break; + /* + default: + p->GetHeadsFunc = GetHeads5; + p->MixMatchesFunc = (Mf_Mix_Matches)MixMatches4; + vTable->Skip = (Mf_Skip_Func)MatchFinderMt4_Skip; + break; + */ + } +} diff --git a/3rdparty/7z/src/LzFindMt.h b/3rdparty/7z/src/LzFindMt.h new file mode 100644 index 0000000000..fdd17008c2 --- /dev/null +++ b/3rdparty/7z/src/LzFindMt.h @@ -0,0 +1,101 @@ +/* LzFindMt.h -- multithreaded Match finder for LZ algorithms +2018-07-04 : Igor Pavlov : Public domain */ + +#ifndef __LZ_FIND_MT_H +#define __LZ_FIND_MT_H + +#include "LzFind.h" +#include "Threads.h" + +EXTERN_C_BEGIN + +#define kMtHashBlockSize (1 << 13) +#define kMtHashNumBlocks (1 << 3) +#define kMtHashNumBlocksMask (kMtHashNumBlocks - 1) + +#define kMtBtBlockSize (1 << 14) +#define kMtBtNumBlocks (1 << 6) +#define kMtBtNumBlocksMask (kMtBtNumBlocks - 1) + +typedef struct _CMtSync +{ + BoolInt wasCreated; + BoolInt needStart; + BoolInt exit; + BoolInt stopWriting; + + CThread thread; + CAutoResetEvent canStart; + CAutoResetEvent wasStarted; + CAutoResetEvent wasStopped; + CSemaphore freeSemaphore; + CSemaphore filledSemaphore; + BoolInt csWasInitialized; + BoolInt csWasEntered; + CCriticalSection cs; + UInt32 numProcessedBlocks; +} CMtSync; + +typedef UInt32 * (*Mf_Mix_Matches)(void *p, UInt32 matchMinPos, UInt32 *distances); + +/* kMtCacheLineDummy must be >= size_of_CPU_cache_line */ +#define kMtCacheLineDummy 128 + +typedef void (*Mf_GetHeads)(const Byte *buffer, UInt32 pos, + UInt32 *hash, UInt32 hashMask, UInt32 *heads, UInt32 numHeads, const UInt32 *crc); + +typedef struct _CMatchFinderMt +{ + /* LZ */ + const Byte *pointerToCurPos; + UInt32 *btBuf; + UInt32 btBufPos; + UInt32 btBufPosLimit; + UInt32 lzPos; + UInt32 btNumAvailBytes; + + UInt32 *hash; + UInt32 fixedHashSize; + UInt32 historySize; + const UInt32 *crc; + + Mf_Mix_Matches MixMatchesFunc; + + /* LZ + BT */ + CMtSync btSync; + Byte btDummy[kMtCacheLineDummy]; + + /* BT */ + UInt32 *hashBuf; + UInt32 hashBufPos; + UInt32 hashBufPosLimit; + UInt32 hashNumAvail; + + CLzRef *son; + UInt32 matchMaxLen; + UInt32 numHashBytes; + UInt32 pos; + const Byte *buffer; + UInt32 cyclicBufferPos; + UInt32 cyclicBufferSize; /* it must be historySize + 1 */ + UInt32 cutValue; + + /* BT + Hash */ + CMtSync hashSync; + /* Byte hashDummy[kMtCacheLineDummy]; */ + + /* Hash */ + Mf_GetHeads GetHeadsFunc; + CMatchFinder *MatchFinder; +} CMatchFinderMt; + +void MatchFinderMt_Construct(CMatchFinderMt *p); +void MatchFinderMt_Destruct(CMatchFinderMt *p, ISzAllocPtr alloc); +SRes MatchFinderMt_Create(CMatchFinderMt *p, UInt32 historySize, UInt32 keepAddBufferBefore, + UInt32 matchMaxLen, UInt32 keepAddBufferAfter, ISzAllocPtr alloc); +void MatchFinderMt_CreateVTable(CMatchFinderMt *p, IMatchFinder *vTable); +void MatchFinderMt_ReleaseStream(CMatchFinderMt *p); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/LzHash.h b/3rdparty/7z/src/LzHash.h new file mode 100644 index 0000000000..2191444072 --- /dev/null +++ b/3rdparty/7z/src/LzHash.h @@ -0,0 +1,57 @@ +/* LzHash.h -- HASH functions for LZ algorithms +2015-04-12 : Igor Pavlov : Public domain */ + +#ifndef __LZ_HASH_H +#define __LZ_HASH_H + +#define kHash2Size (1 << 10) +#define kHash3Size (1 << 16) +#define kHash4Size (1 << 20) + +#define kFix3HashSize (kHash2Size) +#define kFix4HashSize (kHash2Size + kHash3Size) +#define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size) + +#define HASH2_CALC hv = cur[0] | ((UInt32)cur[1] << 8); + +#define HASH3_CALC { \ + UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ + h2 = temp & (kHash2Size - 1); \ + hv = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; } + +#define HASH4_CALC { \ + UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ + h2 = temp & (kHash2Size - 1); \ + temp ^= ((UInt32)cur[2] << 8); \ + h3 = temp & (kHash3Size - 1); \ + hv = (temp ^ (p->crc[cur[3]] << 5)) & p->hashMask; } + +#define HASH5_CALC { \ + UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ + h2 = temp & (kHash2Size - 1); \ + temp ^= ((UInt32)cur[2] << 8); \ + h3 = temp & (kHash3Size - 1); \ + temp ^= (p->crc[cur[3]] << 5); \ + h4 = temp & (kHash4Size - 1); \ + hv = (temp ^ (p->crc[cur[4]] << 3)) & p->hashMask; } + +/* #define HASH_ZIP_CALC hv = ((cur[0] | ((UInt32)cur[1] << 8)) ^ p->crc[cur[2]]) & 0xFFFF; */ +#define HASH_ZIP_CALC hv = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF; + + +#define MT_HASH2_CALC \ + h2 = (p->crc[cur[0]] ^ cur[1]) & (kHash2Size - 1); + +#define MT_HASH3_CALC { \ + UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ + h2 = temp & (kHash2Size - 1); \ + h3 = (temp ^ ((UInt32)cur[2] << 8)) & (kHash3Size - 1); } + +#define MT_HASH4_CALC { \ + UInt32 temp = p->crc[cur[0]] ^ cur[1]; \ + h2 = temp & (kHash2Size - 1); \ + temp ^= ((UInt32)cur[2] << 8); \ + h3 = temp & (kHash3Size - 1); \ + h4 = (temp ^ (p->crc[cur[3]] << 5)) & (kHash4Size - 1); } + +#endif diff --git a/3rdparty/7z/src/Lzma2Dec.c b/3rdparty/7z/src/Lzma2Dec.c new file mode 100644 index 0000000000..2e631051ba --- /dev/null +++ b/3rdparty/7z/src/Lzma2Dec.c @@ -0,0 +1,488 @@ +/* Lzma2Dec.c -- LZMA2 Decoder +2019-02-02 : Igor Pavlov : Public domain */ + +/* #define SHOW_DEBUG_INFO */ + +#include "Precomp.h" + +#ifdef SHOW_DEBUG_INFO +#include +#endif + +#include + +#include "Lzma2Dec.h" + +/* +00000000 - End of data +00000001 U U - Uncompressed, reset dic, need reset state and set new prop +00000010 U U - Uncompressed, no reset +100uuuuu U U P P - LZMA, no reset +101uuuuu U U P P - LZMA, reset state +110uuuuu U U P P S - LZMA, reset state + set new prop +111uuuuu U U P P S - LZMA, reset state + set new prop, reset dic + + u, U - Unpack Size + P - Pack Size + S - Props +*/ + +#define LZMA2_CONTROL_COPY_RESET_DIC 1 + +#define LZMA2_IS_UNCOMPRESSED_STATE(p) (((p)->control & (1 << 7)) == 0) + +#define LZMA2_LCLP_MAX 4 +#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11)) + +#ifdef SHOW_DEBUG_INFO +#define PRF(x) x +#else +#define PRF(x) +#endif + +typedef enum +{ + LZMA2_STATE_CONTROL, + LZMA2_STATE_UNPACK0, + LZMA2_STATE_UNPACK1, + LZMA2_STATE_PACK0, + LZMA2_STATE_PACK1, + LZMA2_STATE_PROP, + LZMA2_STATE_DATA, + LZMA2_STATE_DATA_CONT, + LZMA2_STATE_FINISHED, + LZMA2_STATE_ERROR +} ELzma2State; + +static SRes Lzma2Dec_GetOldProps(Byte prop, Byte *props) +{ + UInt32 dicSize; + if (prop > 40) + return SZ_ERROR_UNSUPPORTED; + dicSize = (prop == 40) ? 0xFFFFFFFF : LZMA2_DIC_SIZE_FROM_PROP(prop); + props[0] = (Byte)LZMA2_LCLP_MAX; + props[1] = (Byte)(dicSize); + props[2] = (Byte)(dicSize >> 8); + props[3] = (Byte)(dicSize >> 16); + props[4] = (Byte)(dicSize >> 24); + return SZ_OK; +} + +SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc) +{ + Byte props[LZMA_PROPS_SIZE]; + RINOK(Lzma2Dec_GetOldProps(prop, props)); + return LzmaDec_AllocateProbs(&p->decoder, props, LZMA_PROPS_SIZE, alloc); +} + +SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc) +{ + Byte props[LZMA_PROPS_SIZE]; + RINOK(Lzma2Dec_GetOldProps(prop, props)); + return LzmaDec_Allocate(&p->decoder, props, LZMA_PROPS_SIZE, alloc); +} + +void Lzma2Dec_Init(CLzma2Dec *p) +{ + p->state = LZMA2_STATE_CONTROL; + p->needInitLevel = 0xE0; + p->isExtraMode = False; + p->unpackSize = 0; + + // p->decoder.dicPos = 0; // we can use it instead of full init + LzmaDec_Init(&p->decoder); +} + +static ELzma2State Lzma2Dec_UpdateState(CLzma2Dec *p, Byte b) +{ + switch (p->state) + { + case LZMA2_STATE_CONTROL: + p->isExtraMode = False; + p->control = b; + PRF(printf("\n %8X", (unsigned)p->decoder.dicPos)); + PRF(printf(" %02X", (unsigned)b)); + if (b == 0) + return LZMA2_STATE_FINISHED; + if (LZMA2_IS_UNCOMPRESSED_STATE(p)) + { + if (b == LZMA2_CONTROL_COPY_RESET_DIC) + p->needInitLevel = 0xC0; + else if (b > 2 || p->needInitLevel == 0xE0) + return LZMA2_STATE_ERROR; + } + else + { + if (b < p->needInitLevel) + return LZMA2_STATE_ERROR; + p->needInitLevel = 0; + p->unpackSize = (UInt32)(b & 0x1F) << 16; + } + return LZMA2_STATE_UNPACK0; + + case LZMA2_STATE_UNPACK0: + p->unpackSize |= (UInt32)b << 8; + return LZMA2_STATE_UNPACK1; + + case LZMA2_STATE_UNPACK1: + p->unpackSize |= (UInt32)b; + p->unpackSize++; + PRF(printf(" %7u", (unsigned)p->unpackSize)); + return LZMA2_IS_UNCOMPRESSED_STATE(p) ? LZMA2_STATE_DATA : LZMA2_STATE_PACK0; + + case LZMA2_STATE_PACK0: + p->packSize = (UInt32)b << 8; + return LZMA2_STATE_PACK1; + + case LZMA2_STATE_PACK1: + p->packSize |= (UInt32)b; + p->packSize++; + // if (p->packSize < 5) return LZMA2_STATE_ERROR; + PRF(printf(" %5u", (unsigned)p->packSize)); + return (p->control & 0x40) ? LZMA2_STATE_PROP : LZMA2_STATE_DATA; + + case LZMA2_STATE_PROP: + { + unsigned lc, lp; + if (b >= (9 * 5 * 5)) + return LZMA2_STATE_ERROR; + lc = b % 9; + b /= 9; + p->decoder.prop.pb = (Byte)(b / 5); + lp = b % 5; + if (lc + lp > LZMA2_LCLP_MAX) + return LZMA2_STATE_ERROR; + p->decoder.prop.lc = (Byte)lc; + p->decoder.prop.lp = (Byte)lp; + return LZMA2_STATE_DATA; + } + } + return LZMA2_STATE_ERROR; +} + +static void LzmaDec_UpdateWithUncompressed(CLzmaDec *p, const Byte *src, SizeT size) +{ + memcpy(p->dic + p->dicPos, src, size); + p->dicPos += size; + if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= size) + p->checkDicSize = p->prop.dicSize; + p->processedPos += (UInt32)size; +} + +void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState); + + +SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit, + const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) +{ + SizeT inSize = *srcLen; + *srcLen = 0; + *status = LZMA_STATUS_NOT_SPECIFIED; + + while (p->state != LZMA2_STATE_ERROR) + { + SizeT dicPos; + + if (p->state == LZMA2_STATE_FINISHED) + { + *status = LZMA_STATUS_FINISHED_WITH_MARK; + return SZ_OK; + } + + dicPos = p->decoder.dicPos; + + if (dicPos == dicLimit && finishMode == LZMA_FINISH_ANY) + { + *status = LZMA_STATUS_NOT_FINISHED; + return SZ_OK; + } + + if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT) + { + if (*srcLen == inSize) + { + *status = LZMA_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + (*srcLen)++; + p->state = Lzma2Dec_UpdateState(p, *src++); + if (dicPos == dicLimit && p->state != LZMA2_STATE_FINISHED) + break; + continue; + } + + { + SizeT inCur = inSize - *srcLen; + SizeT outCur = dicLimit - dicPos; + ELzmaFinishMode curFinishMode = LZMA_FINISH_ANY; + + if (outCur >= p->unpackSize) + { + outCur = (SizeT)p->unpackSize; + curFinishMode = LZMA_FINISH_END; + } + + if (LZMA2_IS_UNCOMPRESSED_STATE(p)) + { + if (inCur == 0) + { + *status = LZMA_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + + if (p->state == LZMA2_STATE_DATA) + { + BoolInt initDic = (p->control == LZMA2_CONTROL_COPY_RESET_DIC); + LzmaDec_InitDicAndState(&p->decoder, initDic, False); + } + + if (inCur > outCur) + inCur = outCur; + if (inCur == 0) + break; + + LzmaDec_UpdateWithUncompressed(&p->decoder, src, inCur); + + src += inCur; + *srcLen += inCur; + p->unpackSize -= (UInt32)inCur; + p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT; + } + else + { + SRes res; + + if (p->state == LZMA2_STATE_DATA) + { + BoolInt initDic = (p->control >= 0xE0); + BoolInt initState = (p->control >= 0xA0); + LzmaDec_InitDicAndState(&p->decoder, initDic, initState); + p->state = LZMA2_STATE_DATA_CONT; + } + + if (inCur > p->packSize) + inCur = (SizeT)p->packSize; + + res = LzmaDec_DecodeToDic(&p->decoder, dicPos + outCur, src, &inCur, curFinishMode, status); + + src += inCur; + *srcLen += inCur; + p->packSize -= (UInt32)inCur; + outCur = p->decoder.dicPos - dicPos; + p->unpackSize -= (UInt32)outCur; + + if (res != 0) + break; + + if (*status == LZMA_STATUS_NEEDS_MORE_INPUT) + { + if (p->packSize == 0) + break; + return SZ_OK; + } + + if (inCur == 0 && outCur == 0) + { + if (*status != LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK + || p->unpackSize != 0 + || p->packSize != 0) + break; + p->state = LZMA2_STATE_CONTROL; + } + + *status = LZMA_STATUS_NOT_SPECIFIED; + } + } + } + + *status = LZMA_STATUS_NOT_SPECIFIED; + p->state = LZMA2_STATE_ERROR; + return SZ_ERROR_DATA; +} + + + + +ELzma2ParseStatus Lzma2Dec_Parse(CLzma2Dec *p, + SizeT outSize, + const Byte *src, SizeT *srcLen, + int checkFinishBlock) +{ + SizeT inSize = *srcLen; + *srcLen = 0; + + while (p->state != LZMA2_STATE_ERROR) + { + if (p->state == LZMA2_STATE_FINISHED) + return (ELzma2ParseStatus)LZMA_STATUS_FINISHED_WITH_MARK; + + if (outSize == 0 && !checkFinishBlock) + return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED; + + if (p->state != LZMA2_STATE_DATA && p->state != LZMA2_STATE_DATA_CONT) + { + if (*srcLen == inSize) + return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT; + (*srcLen)++; + + p->state = Lzma2Dec_UpdateState(p, *src++); + + if (p->state == LZMA2_STATE_UNPACK0) + { + // if (p->decoder.dicPos != 0) + if (p->control == LZMA2_CONTROL_COPY_RESET_DIC || p->control >= 0xE0) + return LZMA2_PARSE_STATUS_NEW_BLOCK; + // if (outSize == 0) return LZMA_STATUS_NOT_FINISHED; + } + + // The following code can be commented. + // It's not big problem, if we read additional input bytes. + // It will be stopped later in LZMA2_STATE_DATA / LZMA2_STATE_DATA_CONT state. + + if (outSize == 0 && p->state != LZMA2_STATE_FINISHED) + { + // checkFinishBlock is true. So we expect that block must be finished, + // We can return LZMA_STATUS_NOT_SPECIFIED or LZMA_STATUS_NOT_FINISHED here + // break; + return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED; + } + + if (p->state == LZMA2_STATE_DATA) + return LZMA2_PARSE_STATUS_NEW_CHUNK; + + continue; + } + + if (outSize == 0) + return (ELzma2ParseStatus)LZMA_STATUS_NOT_FINISHED; + + { + SizeT inCur = inSize - *srcLen; + + if (LZMA2_IS_UNCOMPRESSED_STATE(p)) + { + if (inCur == 0) + return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT; + if (inCur > p->unpackSize) + inCur = p->unpackSize; + if (inCur > outSize) + inCur = outSize; + p->decoder.dicPos += inCur; + src += inCur; + *srcLen += inCur; + outSize -= inCur; + p->unpackSize -= (UInt32)inCur; + p->state = (p->unpackSize == 0) ? LZMA2_STATE_CONTROL : LZMA2_STATE_DATA_CONT; + } + else + { + p->isExtraMode = True; + + if (inCur == 0) + { + if (p->packSize != 0) + return (ELzma2ParseStatus)LZMA_STATUS_NEEDS_MORE_INPUT; + } + else if (p->state == LZMA2_STATE_DATA) + { + p->state = LZMA2_STATE_DATA_CONT; + if (*src != 0) + { + // first byte of lzma chunk must be Zero + *srcLen += 1; + p->packSize--; + break; + } + } + + if (inCur > p->packSize) + inCur = (SizeT)p->packSize; + + src += inCur; + *srcLen += inCur; + p->packSize -= (UInt32)inCur; + + if (p->packSize == 0) + { + SizeT rem = outSize; + if (rem > p->unpackSize) + rem = p->unpackSize; + p->decoder.dicPos += rem; + p->unpackSize -= (UInt32)rem; + outSize -= rem; + if (p->unpackSize == 0) + p->state = LZMA2_STATE_CONTROL; + } + } + } + } + + p->state = LZMA2_STATE_ERROR; + return (ELzma2ParseStatus)LZMA_STATUS_NOT_SPECIFIED; +} + + + + +SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) +{ + SizeT outSize = *destLen, inSize = *srcLen; + *srcLen = *destLen = 0; + + for (;;) + { + SizeT inCur = inSize, outCur, dicPos; + ELzmaFinishMode curFinishMode; + SRes res; + + if (p->decoder.dicPos == p->decoder.dicBufSize) + p->decoder.dicPos = 0; + dicPos = p->decoder.dicPos; + curFinishMode = LZMA_FINISH_ANY; + outCur = p->decoder.dicBufSize - dicPos; + + if (outCur >= outSize) + { + outCur = outSize; + curFinishMode = finishMode; + } + + res = Lzma2Dec_DecodeToDic(p, dicPos + outCur, src, &inCur, curFinishMode, status); + + src += inCur; + inSize -= inCur; + *srcLen += inCur; + outCur = p->decoder.dicPos - dicPos; + memcpy(dest, p->decoder.dic + dicPos, outCur); + dest += outCur; + outSize -= outCur; + *destLen += outCur; + if (res != 0) + return res; + if (outCur == 0 || outSize == 0) + return SZ_OK; + } +} + + +SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAllocPtr alloc) +{ + CLzma2Dec p; + SRes res; + SizeT outSize = *destLen, inSize = *srcLen; + *destLen = *srcLen = 0; + *status = LZMA_STATUS_NOT_SPECIFIED; + Lzma2Dec_Construct(&p); + RINOK(Lzma2Dec_AllocateProbs(&p, prop, alloc)); + p.decoder.dic = dest; + p.decoder.dicBufSize = outSize; + Lzma2Dec_Init(&p); + *srcLen = inSize; + res = Lzma2Dec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status); + *destLen = p.decoder.dicPos; + if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT) + res = SZ_ERROR_INPUT_EOF; + Lzma2Dec_FreeProbs(&p, alloc); + return res; +} diff --git a/3rdparty/7z/src/Lzma2Dec.h b/3rdparty/7z/src/Lzma2Dec.h new file mode 100644 index 0000000000..da50387250 --- /dev/null +++ b/3rdparty/7z/src/Lzma2Dec.h @@ -0,0 +1,120 @@ +/* Lzma2Dec.h -- LZMA2 Decoder +2018-02-19 : Igor Pavlov : Public domain */ + +#ifndef __LZMA2_DEC_H +#define __LZMA2_DEC_H + +#include "LzmaDec.h" + +EXTERN_C_BEGIN + +/* ---------- State Interface ---------- */ + +typedef struct +{ + unsigned state; + Byte control; + Byte needInitLevel; + Byte isExtraMode; + Byte _pad_; + UInt32 packSize; + UInt32 unpackSize; + CLzmaDec decoder; +} CLzma2Dec; + +#define Lzma2Dec_Construct(p) LzmaDec_Construct(&(p)->decoder) +#define Lzma2Dec_FreeProbs(p, alloc) LzmaDec_FreeProbs(&(p)->decoder, alloc) +#define Lzma2Dec_Free(p, alloc) LzmaDec_Free(&(p)->decoder, alloc) + +SRes Lzma2Dec_AllocateProbs(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc); +SRes Lzma2Dec_Allocate(CLzma2Dec *p, Byte prop, ISzAllocPtr alloc); +void Lzma2Dec_Init(CLzma2Dec *p); + +/* +finishMode: + It has meaning only if the decoding reaches output limit (*destLen or dicLimit). + LZMA_FINISH_ANY - use smallest number of input bytes + LZMA_FINISH_END - read EndOfStream marker after decoding + +Returns: + SZ_OK + status: + LZMA_STATUS_FINISHED_WITH_MARK + LZMA_STATUS_NOT_FINISHED + LZMA_STATUS_NEEDS_MORE_INPUT + SZ_ERROR_DATA - Data error +*/ + +SRes Lzma2Dec_DecodeToDic(CLzma2Dec *p, SizeT dicLimit, + const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); + +SRes Lzma2Dec_DecodeToBuf(CLzma2Dec *p, Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); + + +/* ---------- LZMA2 block and chunk parsing ---------- */ + +/* +Lzma2Dec_Parse() parses compressed data stream up to next independent block or next chunk data. +It can return LZMA_STATUS_* code or LZMA2_PARSE_STATUS_* code: + - LZMA2_PARSE_STATUS_NEW_BLOCK - there is new block, and 1 additional byte (control byte of next block header) was read from input. + - LZMA2_PARSE_STATUS_NEW_CHUNK - there is new chunk, and only lzma2 header of new chunk was read. + CLzma2Dec::unpackSize contains unpack size of that chunk +*/ + +typedef enum +{ +/* + LZMA_STATUS_NOT_SPECIFIED // data error + LZMA_STATUS_FINISHED_WITH_MARK + LZMA_STATUS_NOT_FINISHED // + LZMA_STATUS_NEEDS_MORE_INPUT + LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK // unused +*/ + LZMA2_PARSE_STATUS_NEW_BLOCK = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK + 1, + LZMA2_PARSE_STATUS_NEW_CHUNK +} ELzma2ParseStatus; + +ELzma2ParseStatus Lzma2Dec_Parse(CLzma2Dec *p, + SizeT outSize, // output size + const Byte *src, SizeT *srcLen, + int checkFinishBlock // set (checkFinishBlock = 1), if it must read full input data, if decoder.dicPos reaches blockMax position. + ); + +/* +LZMA2 parser doesn't decode LZMA chunks, so we must read + full input LZMA chunk to decode some part of LZMA chunk. + +Lzma2Dec_GetUnpackExtra() returns the value that shows + max possible number of output bytes that can be output by decoder + at current input positon. +*/ + +#define Lzma2Dec_GetUnpackExtra(p) ((p)->isExtraMode ? (p)->unpackSize : 0); + + +/* ---------- One Call Interface ---------- */ + +/* +finishMode: + It has meaning only if the decoding reaches output limit (*destLen). + LZMA_FINISH_ANY - use smallest number of input bytes + LZMA_FINISH_END - read EndOfStream marker after decoding + +Returns: + SZ_OK + status: + LZMA_STATUS_FINISHED_WITH_MARK + LZMA_STATUS_NOT_FINISHED + SZ_ERROR_DATA - Data error + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_UNSUPPORTED - Unsupported properties + SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). +*/ + +SRes Lzma2Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + Byte prop, ELzmaFinishMode finishMode, ELzmaStatus *status, ISzAllocPtr alloc); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Lzma2DecMt.c b/3rdparty/7z/src/Lzma2DecMt.c new file mode 100644 index 0000000000..87d5567ad4 --- /dev/null +++ b/3rdparty/7z/src/Lzma2DecMt.c @@ -0,0 +1,1082 @@ +/* Lzma2DecMt.c -- LZMA2 Decoder Multi-thread +2019-02-02 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +// #define SHOW_DEBUG_INFO + +#ifdef SHOW_DEBUG_INFO +#include +#endif + +#ifdef SHOW_DEBUG_INFO +#define PRF(x) x +#else +#define PRF(x) +#endif + +#define PRF_STR(s) PRF(printf("\n" s "\n")) +#define PRF_STR_INT(s, d) PRF(printf("\n" s " %d\n", (unsigned)d)) +#define PRF_STR_INT_2(s, d1, d2) PRF(printf("\n" s " %d %d\n", (unsigned)d1, (unsigned)d2)) + +// #define _7ZIP_ST + +#include "Alloc.h" + +#include "Lzma2Dec.h" +#include "Lzma2DecMt.h" + +#ifndef _7ZIP_ST +#include "MtDec.h" +#endif + + +#define LZMA2DECMT_OUT_BLOCK_MAX_DEFAULT (1 << 28) + +void Lzma2DecMtProps_Init(CLzma2DecMtProps *p) +{ + p->inBufSize_ST = 1 << 20; + p->outStep_ST = 1 << 20; + + #ifndef _7ZIP_ST + p->numThreads = 1; + p->inBufSize_MT = 1 << 18; + p->outBlockMax = LZMA2DECMT_OUT_BLOCK_MAX_DEFAULT; + p->inBlockMax = p->outBlockMax + p->outBlockMax / 16; + #endif +} + + + +#ifndef _7ZIP_ST + +/* ---------- CLzma2DecMtThread ---------- */ + +typedef struct +{ + CLzma2Dec dec; + Byte dec_created; + Byte needInit; + + Byte *outBuf; + size_t outBufSize; + + EMtDecParseState state; + ELzma2ParseStatus parseStatus; + + size_t inPreSize; + size_t outPreSize; + + size_t inCodeSize; + size_t outCodeSize; + SRes codeRes; + + CAlignOffsetAlloc alloc; + + Byte mtPad[1 << 7]; +} CLzma2DecMtThread; + +#endif + + +/* ---------- CLzma2DecMt ---------- */ + +typedef struct +{ + // ISzAllocPtr alloc; + ISzAllocPtr allocMid; + + CAlignOffsetAlloc alignOffsetAlloc; + CLzma2DecMtProps props; + Byte prop; + + ISeqInStream *inStream; + ISeqOutStream *outStream; + ICompressProgress *progress; + + BoolInt finishMode; + BoolInt outSize_Defined; + UInt64 outSize; + + UInt64 outProcessed; + UInt64 inProcessed; + BoolInt readWasFinished; + SRes readRes; + + Byte *inBuf; + size_t inBufSize; + Byte dec_created; + CLzma2Dec dec; + + size_t inPos; + size_t inLim; + + #ifndef _7ZIP_ST + UInt64 outProcessed_Parse; + BoolInt mtc_WasConstructed; + CMtDec mtc; + CLzma2DecMtThread coders[MTDEC__THREADS_MAX]; + #endif + +} CLzma2DecMt; + + + +CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid) +{ + CLzma2DecMt *p = (CLzma2DecMt *)ISzAlloc_Alloc(alloc, sizeof(CLzma2DecMt)); + if (!p) + return NULL; + + // p->alloc = alloc; + p->allocMid = allocMid; + + AlignOffsetAlloc_CreateVTable(&p->alignOffsetAlloc); + p->alignOffsetAlloc.numAlignBits = 7; + p->alignOffsetAlloc.offset = 0; + p->alignOffsetAlloc.baseAlloc = alloc; + + p->inBuf = NULL; + p->inBufSize = 0; + p->dec_created = False; + + // Lzma2DecMtProps_Init(&p->props); + + #ifndef _7ZIP_ST + p->mtc_WasConstructed = False; + { + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CLzma2DecMtThread *t = &p->coders[i]; + t->dec_created = False; + t->outBuf = NULL; + t->outBufSize = 0; + } + } + #endif + + return p; +} + + +#ifndef _7ZIP_ST + +static void Lzma2DecMt_FreeOutBufs(CLzma2DecMt *p) +{ + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CLzma2DecMtThread *t = &p->coders[i]; + if (t->outBuf) + { + ISzAlloc_Free(p->allocMid, t->outBuf); + t->outBuf = NULL; + t->outBufSize = 0; + } + } +} + +#endif + + +static void Lzma2DecMt_FreeSt(CLzma2DecMt *p) +{ + if (p->dec_created) + { + Lzma2Dec_Free(&p->dec, &p->alignOffsetAlloc.vt); + p->dec_created = False; + } + if (p->inBuf) + { + ISzAlloc_Free(p->allocMid, p->inBuf); + p->inBuf = NULL; + } + p->inBufSize = 0; +} + + +void Lzma2DecMt_Destroy(CLzma2DecMtHandle pp) +{ + CLzma2DecMt *p = (CLzma2DecMt *)pp; + + Lzma2DecMt_FreeSt(p); + + #ifndef _7ZIP_ST + + if (p->mtc_WasConstructed) + { + MtDec_Destruct(&p->mtc); + p->mtc_WasConstructed = False; + } + { + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CLzma2DecMtThread *t = &p->coders[i]; + if (t->dec_created) + { + // we don't need to free dict here + Lzma2Dec_FreeProbs(&t->dec, &t->alloc.vt); // p->alloc !!! + t->dec_created = False; + } + } + } + Lzma2DecMt_FreeOutBufs(p); + + #endif + + ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, pp); +} + + + +#ifndef _7ZIP_ST + +static void Lzma2DecMt_MtCallback_Parse(void *obj, unsigned coderIndex, CMtDecCallbackInfo *cc) +{ + CLzma2DecMt *me = (CLzma2DecMt *)obj; + CLzma2DecMtThread *t = &me->coders[coderIndex]; + + PRF_STR_INT_2("Parse", coderIndex, cc->srcSize); + + cc->state = MTDEC_PARSE_CONTINUE; + + if (cc->startCall) + { + if (!t->dec_created) + { + Lzma2Dec_Construct(&t->dec); + t->dec_created = True; + AlignOffsetAlloc_CreateVTable(&t->alloc); + { + /* (1 << 12) is expected size of one way in data cache. + We optimize alignment for cache line size of 128 bytes and smaller */ + const unsigned kNumAlignBits = 12; + const unsigned kNumCacheLineBits = 7; /* <= kNumAlignBits */ + t->alloc.numAlignBits = kNumAlignBits; + t->alloc.offset = ((UInt32)coderIndex * ((1 << 11) + (1 << 8) + (1 << 6))) & ((1 << kNumAlignBits) - (1 << kNumCacheLineBits)); + t->alloc.baseAlloc = me->alignOffsetAlloc.baseAlloc; + } + } + Lzma2Dec_Init(&t->dec); + + t->inPreSize = 0; + t->outPreSize = 0; + // t->blockWasFinished = False; + // t->finishedWithMark = False; + t->parseStatus = (ELzma2ParseStatus)LZMA_STATUS_NOT_SPECIFIED; + t->state = MTDEC_PARSE_CONTINUE; + + t->inCodeSize = 0; + t->outCodeSize = 0; + t->codeRes = SZ_OK; + + // (cc->srcSize == 0) is allowed + } + + { + ELzma2ParseStatus status; + BoolInt overflow; + UInt32 unpackRem = 0; + + int checkFinishBlock = True; + size_t limit = me->props.outBlockMax; + if (me->outSize_Defined) + { + UInt64 rem = me->outSize - me->outProcessed_Parse; + if (limit >= rem) + { + limit = (size_t)rem; + if (!me->finishMode) + checkFinishBlock = False; + } + } + + // checkFinishBlock = False, if we want to decode partial data + // that must be finished at position <= outBlockMax. + + { + const SizeT srcOrig = cc->srcSize; + SizeT srcSize_Point = 0; + SizeT dicPos_Point = 0; + + cc->srcSize = 0; + overflow = False; + + for (;;) + { + SizeT srcCur = srcOrig - cc->srcSize; + + status = Lzma2Dec_Parse(&t->dec, + limit - t->dec.decoder.dicPos, + cc->src + cc->srcSize, &srcCur, + checkFinishBlock); + + cc->srcSize += srcCur; + + if (status == LZMA2_PARSE_STATUS_NEW_CHUNK) + { + if (t->dec.unpackSize > me->props.outBlockMax - t->dec.decoder.dicPos) + { + overflow = True; + break; + } + continue; + } + + if (status == LZMA2_PARSE_STATUS_NEW_BLOCK) + { + if (t->dec.decoder.dicPos == 0) + continue; + // we decode small blocks in one thread + if (t->dec.decoder.dicPos >= (1 << 14)) + break; + dicPos_Point = t->dec.decoder.dicPos; + srcSize_Point = cc->srcSize; + continue; + } + + if ((int)status == LZMA_STATUS_NOT_FINISHED && checkFinishBlock + // && limit == t->dec.decoder.dicPos + // && limit == me->props.outBlockMax + ) + { + overflow = True; + break; + } + + unpackRem = Lzma2Dec_GetUnpackExtra(&t->dec); + break; + } + + if (dicPos_Point != 0 + && (int)status != LZMA2_PARSE_STATUS_NEW_BLOCK + && (int)status != LZMA_STATUS_FINISHED_WITH_MARK + && (int)status != LZMA_STATUS_NOT_SPECIFIED) + { + // we revert to latest newBlock state + status = LZMA2_PARSE_STATUS_NEW_BLOCK; + unpackRem = 0; + t->dec.decoder.dicPos = dicPos_Point; + cc->srcSize = srcSize_Point; + overflow = False; + } + } + + t->inPreSize += cc->srcSize; + t->parseStatus = status; + + if (overflow) + cc->state = MTDEC_PARSE_OVERFLOW; + else + { + size_t dicPos = t->dec.decoder.dicPos; + + if ((int)status != LZMA_STATUS_NEEDS_MORE_INPUT) + { + if (status == LZMA2_PARSE_STATUS_NEW_BLOCK) + { + cc->state = MTDEC_PARSE_NEW; + cc->srcSize--; // we don't need control byte of next block + t->inPreSize--; + } + else + { + cc->state = MTDEC_PARSE_END; + if ((int)status != LZMA_STATUS_FINISHED_WITH_MARK) + { + // (status == LZMA_STATUS_NOT_SPECIFIED) + // (status == LZMA_STATUS_NOT_FINISHED) + if (unpackRem != 0) + { + /* we also reserve space for max possible number of output bytes of current LZMA chunk */ + SizeT rem = limit - dicPos; + if (rem > unpackRem) + rem = unpackRem; + dicPos += rem; + } + } + } + + me->outProcessed_Parse += dicPos; + } + + cc->outPos = dicPos; + t->outPreSize = (size_t)dicPos; + } + + t->state = cc->state; + return; + } +} + + +static SRes Lzma2DecMt_MtCallback_PreCode(void *pp, unsigned coderIndex) +{ + CLzma2DecMt *me = (CLzma2DecMt *)pp; + CLzma2DecMtThread *t = &me->coders[coderIndex]; + Byte *dest = t->outBuf; + + if (t->inPreSize == 0) + { + t->codeRes = SZ_ERROR_DATA; + return t->codeRes; + } + + if (!dest || t->outBufSize < t->outPreSize) + { + if (dest) + { + ISzAlloc_Free(me->allocMid, dest); + t->outBuf = NULL; + t->outBufSize = 0; + } + + dest = (Byte *)ISzAlloc_Alloc(me->allocMid, t->outPreSize + // + (1 << 28) + ); + // Sleep(200); + if (!dest) + return SZ_ERROR_MEM; + t->outBuf = dest; + t->outBufSize = t->outPreSize; + } + + t->dec.decoder.dic = dest; + t->dec.decoder.dicBufSize = t->outPreSize; + + t->needInit = True; + + return Lzma2Dec_AllocateProbs(&t->dec, me->prop, &t->alloc.vt); // alloc.vt +} + + +static SRes Lzma2DecMt_MtCallback_Code(void *pp, unsigned coderIndex, + const Byte *src, size_t srcSize, int srcFinished, + // int finished, int blockFinished, + UInt64 *inCodePos, UInt64 *outCodePos, int *stop) +{ + CLzma2DecMt *me = (CLzma2DecMt *)pp; + CLzma2DecMtThread *t = &me->coders[coderIndex]; + + UNUSED_VAR(srcFinished) + + PRF_STR_INT_2("Code", coderIndex, srcSize); + + *inCodePos = t->inCodeSize; + *outCodePos = 0; + *stop = True; + + if (t->needInit) + { + Lzma2Dec_Init(&t->dec); + t->needInit = False; + } + + { + ELzmaStatus status; + size_t srcProcessed = srcSize; + BoolInt blockWasFinished = + ((int)t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK + || t->parseStatus == LZMA2_PARSE_STATUS_NEW_BLOCK); + + SRes res = Lzma2Dec_DecodeToDic(&t->dec, + t->outPreSize, + src, &srcProcessed, + blockWasFinished ? LZMA_FINISH_END : LZMA_FINISH_ANY, + &status); + + t->codeRes = res; + + t->inCodeSize += srcProcessed; + *inCodePos = t->inCodeSize; + t->outCodeSize = t->dec.decoder.dicPos; + *outCodePos = t->dec.decoder.dicPos; + + if (res != SZ_OK) + return res; + + if (srcProcessed == srcSize) + *stop = False; + + if (blockWasFinished) + { + if (srcSize != srcProcessed) + return SZ_ERROR_FAIL; + + if (t->inPreSize == t->inCodeSize) + { + if (t->outPreSize != t->outCodeSize) + return SZ_ERROR_FAIL; + *stop = True; + } + } + else + { + if (t->outPreSize == t->outCodeSize) + *stop = True; + } + + return SZ_OK; + } +} + + +#define LZMA2DECMT_STREAM_WRITE_STEP (1 << 24) + +static SRes Lzma2DecMt_MtCallback_Write(void *pp, unsigned coderIndex, + BoolInt needWriteToStream, + const Byte *src, size_t srcSize, + BoolInt *needContinue, BoolInt *canRecode) +{ + CLzma2DecMt *me = (CLzma2DecMt *)pp; + const CLzma2DecMtThread *t = &me->coders[coderIndex]; + size_t size = t->outCodeSize; + const Byte *data = t->outBuf; + BoolInt needContinue2 = True; + + PRF_STR_INT_2("Write", coderIndex, srcSize); + + *needContinue = False; + *canRecode = True; + UNUSED_VAR(src) + UNUSED_VAR(srcSize) + + if ( + // t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK + t->state == MTDEC_PARSE_OVERFLOW + || t->state == MTDEC_PARSE_END) + needContinue2 = False; + + + if (!needWriteToStream) + return SZ_OK; + + me->mtc.inProcessed += t->inCodeSize; + + if (t->codeRes == SZ_OK) + if ((int)t->parseStatus == LZMA_STATUS_FINISHED_WITH_MARK + || t->parseStatus == LZMA2_PARSE_STATUS_NEW_BLOCK) + if (t->outPreSize != t->outCodeSize + || t->inPreSize != t->inCodeSize) + return SZ_ERROR_FAIL; + + *canRecode = False; + + if (me->outStream) + { + for (;;) + { + size_t cur = size; + size_t written; + if (cur > LZMA2DECMT_STREAM_WRITE_STEP) + cur = LZMA2DECMT_STREAM_WRITE_STEP; + + written = ISeqOutStream_Write(me->outStream, data, cur); + + me->outProcessed += written; + // me->mtc.writtenTotal += written; + if (written != cur) + return SZ_ERROR_WRITE; + data += cur; + size -= cur; + if (size == 0) + { + *needContinue = needContinue2; + return SZ_OK; + } + RINOK(MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0)); + } + } + + return SZ_ERROR_FAIL; + /* + if (size > me->outBufSize) + return SZ_ERROR_OUTPUT_EOF; + memcpy(me->outBuf, data, size); + me->outBufSize -= size; + me->outBuf += size; + *needContinue = needContinue2; + return SZ_OK; + */ +} + +#endif + + +static SRes Lzma2Dec_Prepare_ST(CLzma2DecMt *p) +{ + if (!p->dec_created) + { + Lzma2Dec_Construct(&p->dec); + p->dec_created = True; + } + + RINOK(Lzma2Dec_Allocate(&p->dec, p->prop, &p->alignOffsetAlloc.vt)); + + if (!p->inBuf || p->inBufSize != p->props.inBufSize_ST) + { + ISzAlloc_Free(p->allocMid, p->inBuf); + p->inBufSize = 0; + p->inBuf = (Byte *)ISzAlloc_Alloc(p->allocMid, p->props.inBufSize_ST); + if (!p->inBuf) + return SZ_ERROR_MEM; + p->inBufSize = p->props.inBufSize_ST; + } + + Lzma2Dec_Init(&p->dec); + + return SZ_OK; +} + + +static SRes Lzma2Dec_Decode_ST(CLzma2DecMt *p + #ifndef _7ZIP_ST + , BoolInt tMode + #endif + ) +{ + SizeT wrPos; + size_t inPos, inLim; + const Byte *inData; + UInt64 inPrev, outPrev; + + CLzma2Dec *dec; + + #ifndef _7ZIP_ST + if (tMode) + { + Lzma2DecMt_FreeOutBufs(p); + tMode = MtDec_PrepareRead(&p->mtc); + } + #endif + + RINOK(Lzma2Dec_Prepare_ST(p)); + + dec = &p->dec; + + inPrev = p->inProcessed; + outPrev = p->outProcessed; + + inPos = 0; + inLim = 0; + inData = NULL; + wrPos = dec->decoder.dicPos; + + for (;;) + { + SizeT dicPos; + SizeT size; + ELzmaFinishMode finishMode; + SizeT inProcessed; + ELzmaStatus status; + SRes res; + + SizeT outProcessed; + BoolInt outFinished; + BoolInt needStop; + + if (inPos == inLim) + { + #ifndef _7ZIP_ST + if (tMode) + { + inData = MtDec_Read(&p->mtc, &inLim); + inPos = 0; + if (inData) + continue; + tMode = False; + inLim = 0; + } + #endif + + if (!p->readWasFinished) + { + inPos = 0; + inLim = p->inBufSize; + inData = p->inBuf; + p->readRes = ISeqInStream_Read(p->inStream, (void *)inData, &inLim); + // p->readProcessed += inLim; + // inLim -= 5; p->readWasFinished = True; // for test + if (inLim == 0 || p->readRes != SZ_OK) + p->readWasFinished = True; + } + } + + dicPos = dec->decoder.dicPos; + { + SizeT next = dec->decoder.dicBufSize; + if (next - wrPos > p->props.outStep_ST) + next = wrPos + p->props.outStep_ST; + size = next - dicPos; + } + + finishMode = LZMA_FINISH_ANY; + if (p->outSize_Defined) + { + const UInt64 rem = p->outSize - p->outProcessed; + if (size >= rem) + { + size = (SizeT)rem; + if (p->finishMode) + finishMode = LZMA_FINISH_END; + } + } + + inProcessed = inLim - inPos; + + res = Lzma2Dec_DecodeToDic(dec, dicPos + size, inData + inPos, &inProcessed, finishMode, &status); + + inPos += inProcessed; + p->inProcessed += inProcessed; + outProcessed = dec->decoder.dicPos - dicPos; + p->outProcessed += outProcessed; + + outFinished = (p->outSize_Defined && p->outSize <= p->outProcessed); + + needStop = (res != SZ_OK + || (inProcessed == 0 && outProcessed == 0) + || status == LZMA_STATUS_FINISHED_WITH_MARK + || (!p->finishMode && outFinished)); + + if (needStop || outProcessed >= size) + { + SRes res2; + { + size_t writeSize = dec->decoder.dicPos - wrPos; + size_t written = ISeqOutStream_Write(p->outStream, dec->decoder.dic + wrPos, writeSize); + res2 = (written == writeSize) ? SZ_OK : SZ_ERROR_WRITE; + } + + if (dec->decoder.dicPos == dec->decoder.dicBufSize) + dec->decoder.dicPos = 0; + wrPos = dec->decoder.dicPos; + + RINOK(res2); + + if (needStop) + { + if (res != SZ_OK) + return res; + + if (status == LZMA_STATUS_FINISHED_WITH_MARK) + { + if (p->finishMode) + { + if (p->outSize_Defined && p->outSize != p->outProcessed) + return SZ_ERROR_DATA; + } + return SZ_OK; + } + + if (!p->finishMode && outFinished) + return SZ_OK; + + if (status == LZMA_STATUS_NEEDS_MORE_INPUT) + return SZ_ERROR_INPUT_EOF; + + return SZ_ERROR_DATA; + } + } + + if (p->progress) + { + UInt64 inDelta = p->inProcessed - inPrev; + UInt64 outDelta = p->outProcessed - outPrev; + if (inDelta >= (1 << 22) || outDelta >= (1 << 22)) + { + RINOK(ICompressProgress_Progress(p->progress, p->inProcessed, p->outProcessed)); + inPrev = p->inProcessed; + outPrev = p->outProcessed; + } + } + } +} + + + +SRes Lzma2DecMt_Decode(CLzma2DecMtHandle pp, + Byte prop, + const CLzma2DecMtProps *props, + ISeqOutStream *outStream, const UInt64 *outDataSize, int finishMode, + // Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + // const Byte *inData, size_t inDataSize, + UInt64 *inProcessed, + // UInt64 *outProcessed, + int *isMT, + ICompressProgress *progress) +{ + CLzma2DecMt *p = (CLzma2DecMt *)pp; + #ifndef _7ZIP_ST + BoolInt tMode; + #endif + + *inProcessed = 0; + + if (prop > 40) + return SZ_ERROR_UNSUPPORTED; + + p->prop = prop; + p->props = *props; + + p->inStream = inStream; + p->outStream = outStream; + p->progress = progress; + + p->outSize = 0; + p->outSize_Defined = False; + if (outDataSize) + { + p->outSize_Defined = True; + p->outSize = *outDataSize; + } + p->finishMode = finishMode; + + p->outProcessed = 0; + p->inProcessed = 0; + + p->readWasFinished = False; + + *isMT = False; + + + #ifndef _7ZIP_ST + + tMode = False; + + // p->mtc.parseRes = SZ_OK; + + // p->mtc.numFilledThreads = 0; + // p->mtc.crossStart = 0; + // p->mtc.crossEnd = 0; + // p->mtc.allocError_for_Read_BlockIndex = 0; + // p->mtc.isAllocError = False; + + if (p->props.numThreads > 1) + { + IMtDecCallback vt; + + Lzma2DecMt_FreeSt(p); + + p->outProcessed_Parse = 0; + + if (!p->mtc_WasConstructed) + { + p->mtc_WasConstructed = True; + MtDec_Construct(&p->mtc); + } + + p->mtc.progress = progress; + p->mtc.inStream = inStream; + + // p->outBuf = NULL; + // p->outBufSize = 0; + /* + if (!outStream) + { + // p->outBuf = outBuf; + // p->outBufSize = *outBufSize; + // *outBufSize = 0; + return SZ_ERROR_PARAM; + } + */ + + // p->mtc.inBlockMax = p->props.inBlockMax; + p->mtc.alloc = &p->alignOffsetAlloc.vt; + // p->alignOffsetAlloc.baseAlloc; + // p->mtc.inData = inData; + // p->mtc.inDataSize = inDataSize; + p->mtc.mtCallback = &vt; + p->mtc.mtCallbackObject = p; + + p->mtc.inBufSize = p->props.inBufSize_MT; + + p->mtc.numThreadsMax = p->props.numThreads; + + *isMT = True; + + vt.Parse = Lzma2DecMt_MtCallback_Parse; + vt.PreCode = Lzma2DecMt_MtCallback_PreCode; + vt.Code = Lzma2DecMt_MtCallback_Code; + vt.Write = Lzma2DecMt_MtCallback_Write; + + { + BoolInt needContinue = False; + + SRes res = MtDec_Code(&p->mtc); + + /* + if (!outStream) + *outBufSize = p->outBuf - outBuf; + */ + + *inProcessed = p->mtc.inProcessed; + + needContinue = False; + + if (res == SZ_OK) + { + if (p->mtc.mtProgress.res != SZ_OK) + res = p->mtc.mtProgress.res; + else + needContinue = p->mtc.needContinue; + } + + if (!needContinue) + { + if (res == SZ_OK) + return p->mtc.readRes; + return res; + } + + tMode = True; + p->readRes = p->mtc.readRes; + p->readWasFinished = p->mtc.readWasFinished; + p->inProcessed = p->mtc.inProcessed; + + PRF_STR("----- decoding ST -----"); + } + } + + #endif + + + *isMT = False; + + { + SRes res = Lzma2Dec_Decode_ST(p + #ifndef _7ZIP_ST + , tMode + #endif + ); + + *inProcessed = p->inProcessed; + + // res = SZ_OK; // for test + if (res == SZ_OK && p->readRes != SZ_OK) + res = p->readRes; + + /* + #ifndef _7ZIP_ST + if (res == SZ_OK && tMode && p->mtc.parseRes != SZ_OK) + res = p->mtc.parseRes; + #endif + */ + + return res; + } +} + + +/* ---------- Read from CLzma2DecMtHandle Interface ---------- */ + +SRes Lzma2DecMt_Init(CLzma2DecMtHandle pp, + Byte prop, + const CLzma2DecMtProps *props, + const UInt64 *outDataSize, int finishMode, + ISeqInStream *inStream) +{ + CLzma2DecMt *p = (CLzma2DecMt *)pp; + + if (prop > 40) + return SZ_ERROR_UNSUPPORTED; + + p->prop = prop; + p->props = *props; + + p->inStream = inStream; + + p->outSize = 0; + p->outSize_Defined = False; + if (outDataSize) + { + p->outSize_Defined = True; + p->outSize = *outDataSize; + } + p->finishMode = finishMode; + + p->outProcessed = 0; + p->inProcessed = 0; + + p->inPos = 0; + p->inLim = 0; + + return Lzma2Dec_Prepare_ST(p); +} + + +SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp, + Byte *data, size_t *outSize, + UInt64 *inStreamProcessed) +{ + CLzma2DecMt *p = (CLzma2DecMt *)pp; + ELzmaFinishMode finishMode; + SRes readRes; + size_t size = *outSize; + + *outSize = 0; + *inStreamProcessed = 0; + + finishMode = LZMA_FINISH_ANY; + if (p->outSize_Defined) + { + const UInt64 rem = p->outSize - p->outProcessed; + if (size >= rem) + { + size = (size_t)rem; + if (p->finishMode) + finishMode = LZMA_FINISH_END; + } + } + + readRes = SZ_OK; + + for (;;) + { + SizeT inCur; + SizeT outCur; + ELzmaStatus status; + SRes res; + + if (p->inPos == p->inLim && readRes == SZ_OK) + { + p->inPos = 0; + p->inLim = p->props.inBufSize_ST; + readRes = ISeqInStream_Read(p->inStream, p->inBuf, &p->inLim); + } + + inCur = p->inLim - p->inPos; + outCur = size; + + res = Lzma2Dec_DecodeToBuf(&p->dec, data, &outCur, + p->inBuf + p->inPos, &inCur, finishMode, &status); + + p->inPos += inCur; + p->inProcessed += inCur; + *inStreamProcessed += inCur; + p->outProcessed += outCur; + *outSize += outCur; + size -= outCur; + data += outCur; + + if (res != 0) + return res; + + /* + if (status == LZMA_STATUS_FINISHED_WITH_MARK) + return readRes; + + if (size == 0 && status != LZMA_STATUS_NEEDS_MORE_INPUT) + { + if (p->finishMode && p->outSize_Defined && p->outProcessed >= p->outSize) + return SZ_ERROR_DATA; + return readRes; + } + */ + + if (inCur == 0 && outCur == 0) + return readRes; + } +} diff --git a/3rdparty/7z/src/Lzma2DecMt.h b/3rdparty/7z/src/Lzma2DecMt.h new file mode 100644 index 0000000000..96f89a3ca2 --- /dev/null +++ b/3rdparty/7z/src/Lzma2DecMt.h @@ -0,0 +1,79 @@ +/* Lzma2DecMt.h -- LZMA2 Decoder Multi-thread +2018-02-17 : Igor Pavlov : Public domain */ + +#ifndef __LZMA2_DEC_MT_H +#define __LZMA2_DEC_MT_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +typedef struct +{ + size_t inBufSize_ST; + size_t outStep_ST; + + #ifndef _7ZIP_ST + unsigned numThreads; + size_t inBufSize_MT; + size_t outBlockMax; + size_t inBlockMax; + #endif +} CLzma2DecMtProps; + +/* init to single-thread mode */ +void Lzma2DecMtProps_Init(CLzma2DecMtProps *p); + + +/* ---------- CLzma2DecMtHandle Interface ---------- */ + +/* Lzma2DecMt_ * functions can return the following exit codes: +SRes: + SZ_OK - OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_PARAM - Incorrect paramater in props + SZ_ERROR_WRITE - ISeqOutStream write callback error + // SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output + SZ_ERROR_PROGRESS - some break from progress callback + SZ_ERROR_THREAD - error in multithreading functions (only for Mt version) +*/ + +typedef void * CLzma2DecMtHandle; + +CLzma2DecMtHandle Lzma2DecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid); +void Lzma2DecMt_Destroy(CLzma2DecMtHandle p); + +SRes Lzma2DecMt_Decode(CLzma2DecMtHandle p, + Byte prop, + const CLzma2DecMtProps *props, + ISeqOutStream *outStream, + const UInt64 *outDataSize, // NULL means undefined + int finishMode, // 0 - partial unpacking is allowed, 1 - if lzma2 stream must be finished + // Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + // const Byte *inData, size_t inDataSize, + + // out variables: + UInt64 *inProcessed, + int *isMT, /* out: (*isMT == 0), if single thread decoding was used */ + + // UInt64 *outProcessed, + ICompressProgress *progress); + + +/* ---------- Read from CLzma2DecMtHandle Interface ---------- */ + +SRes Lzma2DecMt_Init(CLzma2DecMtHandle pp, + Byte prop, + const CLzma2DecMtProps *props, + const UInt64 *outDataSize, int finishMode, + ISeqInStream *inStream); + +SRes Lzma2DecMt_Read(CLzma2DecMtHandle pp, + Byte *data, size_t *outSize, + UInt64 *inStreamProcessed); + + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Lzma2Enc.c b/3rdparty/7z/src/Lzma2Enc.c new file mode 100644 index 0000000000..d541477527 --- /dev/null +++ b/3rdparty/7z/src/Lzma2Enc.c @@ -0,0 +1,803 @@ +/* Lzma2Enc.c -- LZMA2 Encoder +2018-07-04 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +/* #define _7ZIP_ST */ + +#include "Lzma2Enc.h" + +#ifndef _7ZIP_ST +#include "MtCoder.h" +#else +#define MTCODER__THREADS_MAX 1 +#endif + +#define LZMA2_CONTROL_LZMA (1 << 7) +#define LZMA2_CONTROL_COPY_NO_RESET 2 +#define LZMA2_CONTROL_COPY_RESET_DIC 1 +#define LZMA2_CONTROL_EOF 0 + +#define LZMA2_LCLP_MAX 4 + +#define LZMA2_DIC_SIZE_FROM_PROP(p) (((UInt32)2 | ((p) & 1)) << ((p) / 2 + 11)) + +#define LZMA2_PACK_SIZE_MAX (1 << 16) +#define LZMA2_COPY_CHUNK_SIZE LZMA2_PACK_SIZE_MAX +#define LZMA2_UNPACK_SIZE_MAX (1 << 21) +#define LZMA2_KEEP_WINDOW_SIZE LZMA2_UNPACK_SIZE_MAX + +#define LZMA2_CHUNK_SIZE_COMPRESSED_MAX ((1 << 16) + 16) + + +#define PRF(x) /* x */ + + +/* ---------- CLimitedSeqInStream ---------- */ + +typedef struct +{ + ISeqInStream vt; + ISeqInStream *realStream; + UInt64 limit; + UInt64 processed; + int finished; +} CLimitedSeqInStream; + +static void LimitedSeqInStream_Init(CLimitedSeqInStream *p) +{ + p->limit = (UInt64)(Int64)-1; + p->processed = 0; + p->finished = 0; +} + +static SRes LimitedSeqInStream_Read(const ISeqInStream *pp, void *data, size_t *size) +{ + CLimitedSeqInStream *p = CONTAINER_FROM_VTBL(pp, CLimitedSeqInStream, vt); + size_t size2 = *size; + SRes res = SZ_OK; + + if (p->limit != (UInt64)(Int64)-1) + { + UInt64 rem = p->limit - p->processed; + if (size2 > rem) + size2 = (size_t)rem; + } + if (size2 != 0) + { + res = ISeqInStream_Read(p->realStream, data, &size2); + p->finished = (size2 == 0 ? 1 : 0); + p->processed += size2; + } + *size = size2; + return res; +} + + +/* ---------- CLzma2EncInt ---------- */ + +typedef struct +{ + CLzmaEncHandle enc; + Byte propsAreSet; + Byte propsByte; + Byte needInitState; + Byte needInitProp; + UInt64 srcPos; +} CLzma2EncInt; + + +static SRes Lzma2EncInt_InitStream(CLzma2EncInt *p, const CLzma2EncProps *props) +{ + if (!p->propsAreSet) + { + SizeT propsSize = LZMA_PROPS_SIZE; + Byte propsEncoded[LZMA_PROPS_SIZE]; + RINOK(LzmaEnc_SetProps(p->enc, &props->lzmaProps)); + RINOK(LzmaEnc_WriteProperties(p->enc, propsEncoded, &propsSize)); + p->propsByte = propsEncoded[0]; + p->propsAreSet = True; + } + return SZ_OK; +} + +static void Lzma2EncInt_InitBlock(CLzma2EncInt *p) +{ + p->srcPos = 0; + p->needInitState = True; + p->needInitProp = True; +} + + +SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ISeqInStream *inStream, UInt32 keepWindowSize, + ISzAllocPtr alloc, ISzAllocPtr allocBig); +SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen, + UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig); +SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit, + Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize); +const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp); +void LzmaEnc_Finish(CLzmaEncHandle pp); +void LzmaEnc_SaveState(CLzmaEncHandle pp); +void LzmaEnc_RestoreState(CLzmaEncHandle pp); + +/* +UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp); +*/ + +static SRes Lzma2EncInt_EncodeSubblock(CLzma2EncInt *p, Byte *outBuf, + size_t *packSizeRes, ISeqOutStream *outStream) +{ + size_t packSizeLimit = *packSizeRes; + size_t packSize = packSizeLimit; + UInt32 unpackSize = LZMA2_UNPACK_SIZE_MAX; + unsigned lzHeaderSize = 5 + (p->needInitProp ? 1 : 0); + BoolInt useCopyBlock; + SRes res; + + *packSizeRes = 0; + if (packSize < lzHeaderSize) + return SZ_ERROR_OUTPUT_EOF; + packSize -= lzHeaderSize; + + LzmaEnc_SaveState(p->enc); + res = LzmaEnc_CodeOneMemBlock(p->enc, p->needInitState, + outBuf + lzHeaderSize, &packSize, LZMA2_PACK_SIZE_MAX, &unpackSize); + + PRF(printf("\npackSize = %7d unpackSize = %7d ", packSize, unpackSize)); + + if (unpackSize == 0) + return res; + + if (res == SZ_OK) + useCopyBlock = (packSize + 2 >= unpackSize || packSize > (1 << 16)); + else + { + if (res != SZ_ERROR_OUTPUT_EOF) + return res; + res = SZ_OK; + useCopyBlock = True; + } + + if (useCopyBlock) + { + size_t destPos = 0; + PRF(printf("################# COPY ")); + + while (unpackSize > 0) + { + UInt32 u = (unpackSize < LZMA2_COPY_CHUNK_SIZE) ? unpackSize : LZMA2_COPY_CHUNK_SIZE; + if (packSizeLimit - destPos < u + 3) + return SZ_ERROR_OUTPUT_EOF; + outBuf[destPos++] = (Byte)(p->srcPos == 0 ? LZMA2_CONTROL_COPY_RESET_DIC : LZMA2_CONTROL_COPY_NO_RESET); + outBuf[destPos++] = (Byte)((u - 1) >> 8); + outBuf[destPos++] = (Byte)(u - 1); + memcpy(outBuf + destPos, LzmaEnc_GetCurBuf(p->enc) - unpackSize, u); + unpackSize -= u; + destPos += u; + p->srcPos += u; + + if (outStream) + { + *packSizeRes += destPos; + if (ISeqOutStream_Write(outStream, outBuf, destPos) != destPos) + return SZ_ERROR_WRITE; + destPos = 0; + } + else + *packSizeRes = destPos; + /* needInitState = True; */ + } + + LzmaEnc_RestoreState(p->enc); + return SZ_OK; + } + + { + size_t destPos = 0; + UInt32 u = unpackSize - 1; + UInt32 pm = (UInt32)(packSize - 1); + unsigned mode = (p->srcPos == 0) ? 3 : (p->needInitState ? (p->needInitProp ? 2 : 1) : 0); + + PRF(printf(" ")); + + outBuf[destPos++] = (Byte)(LZMA2_CONTROL_LZMA | (mode << 5) | ((u >> 16) & 0x1F)); + outBuf[destPos++] = (Byte)(u >> 8); + outBuf[destPos++] = (Byte)u; + outBuf[destPos++] = (Byte)(pm >> 8); + outBuf[destPos++] = (Byte)pm; + + if (p->needInitProp) + outBuf[destPos++] = p->propsByte; + + p->needInitProp = False; + p->needInitState = False; + destPos += packSize; + p->srcPos += unpackSize; + + if (outStream) + if (ISeqOutStream_Write(outStream, outBuf, destPos) != destPos) + return SZ_ERROR_WRITE; + + *packSizeRes = destPos; + return SZ_OK; + } +} + + +/* ---------- Lzma2 Props ---------- */ + +void Lzma2EncProps_Init(CLzma2EncProps *p) +{ + LzmaEncProps_Init(&p->lzmaProps); + p->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO; + p->numBlockThreads_Reduced = -1; + p->numBlockThreads_Max = -1; + p->numTotalThreads = -1; +} + +void Lzma2EncProps_Normalize(CLzma2EncProps *p) +{ + UInt64 fileSize; + int t1, t1n, t2, t2r, t3; + { + CLzmaEncProps lzmaProps = p->lzmaProps; + LzmaEncProps_Normalize(&lzmaProps); + t1n = lzmaProps.numThreads; + } + + t1 = p->lzmaProps.numThreads; + t2 = p->numBlockThreads_Max; + t3 = p->numTotalThreads; + + if (t2 > MTCODER__THREADS_MAX) + t2 = MTCODER__THREADS_MAX; + + if (t3 <= 0) + { + if (t2 <= 0) + t2 = 1; + t3 = t1n * t2; + } + else if (t2 <= 0) + { + t2 = t3 / t1n; + if (t2 == 0) + { + t1 = 1; + t2 = t3; + } + if (t2 > MTCODER__THREADS_MAX) + t2 = MTCODER__THREADS_MAX; + } + else if (t1 <= 0) + { + t1 = t3 / t2; + if (t1 == 0) + t1 = 1; + } + else + t3 = t1n * t2; + + p->lzmaProps.numThreads = t1; + + t2r = t2; + + fileSize = p->lzmaProps.reduceSize; + + if ( p->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID + && p->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO + && (p->blockSize < fileSize || fileSize == (UInt64)(Int64)-1)) + p->lzmaProps.reduceSize = p->blockSize; + + LzmaEncProps_Normalize(&p->lzmaProps); + + p->lzmaProps.reduceSize = fileSize; + + t1 = p->lzmaProps.numThreads; + + if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID) + { + t2r = t2 = 1; + t3 = t1; + } + else if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO && t2 <= 1) + { + /* if there is no block multi-threading, we use SOLID block */ + p->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID; + } + else + { + if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO) + { + const UInt32 kMinSize = (UInt32)1 << 20; + const UInt32 kMaxSize = (UInt32)1 << 28; + const UInt32 dictSize = p->lzmaProps.dictSize; + UInt64 blockSize = (UInt64)dictSize << 2; + if (blockSize < kMinSize) blockSize = kMinSize; + if (blockSize > kMaxSize) blockSize = kMaxSize; + if (blockSize < dictSize) blockSize = dictSize; + blockSize += (kMinSize - 1); + blockSize &= ~(UInt64)(kMinSize - 1); + p->blockSize = blockSize; + } + + if (t2 > 1 && fileSize != (UInt64)(Int64)-1) + { + UInt64 numBlocks = fileSize / p->blockSize; + if (numBlocks * p->blockSize != fileSize) + numBlocks++; + if (numBlocks < (unsigned)t2) + { + t2r = (unsigned)numBlocks; + if (t2r == 0) + t2r = 1; + t3 = t1 * t2r; + } + } + } + + p->numBlockThreads_Max = t2; + p->numBlockThreads_Reduced = t2r; + p->numTotalThreads = t3; +} + + +static SRes Progress(ICompressProgress *p, UInt64 inSize, UInt64 outSize) +{ + return (p && ICompressProgress_Progress(p, inSize, outSize) != SZ_OK) ? SZ_ERROR_PROGRESS : SZ_OK; +} + + +/* ---------- Lzma2 ---------- */ + +typedef struct +{ + Byte propEncoded; + CLzma2EncProps props; + UInt64 expectedDataSize; + + Byte *tempBufLzma; + + ISzAllocPtr alloc; + ISzAllocPtr allocBig; + + CLzma2EncInt coders[MTCODER__THREADS_MAX]; + + #ifndef _7ZIP_ST + + ISeqOutStream *outStream; + Byte *outBuf; + size_t outBuf_Rem; /* remainder in outBuf */ + + size_t outBufSize; /* size of allocated outBufs[i] */ + size_t outBufsDataSizes[MTCODER__BLOCKS_MAX]; + BoolInt mtCoder_WasConstructed; + CMtCoder mtCoder; + Byte *outBufs[MTCODER__BLOCKS_MAX]; + + #endif + +} CLzma2Enc; + + + +CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + CLzma2Enc *p = (CLzma2Enc *)ISzAlloc_Alloc(alloc, sizeof(CLzma2Enc)); + if (!p) + return NULL; + Lzma2EncProps_Init(&p->props); + Lzma2EncProps_Normalize(&p->props); + p->expectedDataSize = (UInt64)(Int64)-1; + p->tempBufLzma = NULL; + p->alloc = alloc; + p->allocBig = allocBig; + { + unsigned i; + for (i = 0; i < MTCODER__THREADS_MAX; i++) + p->coders[i].enc = NULL; + } + + #ifndef _7ZIP_ST + p->mtCoder_WasConstructed = False; + { + unsigned i; + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + p->outBufs[i] = NULL; + p->outBufSize = 0; + } + #endif + + return p; +} + + +#ifndef _7ZIP_ST + +static void Lzma2Enc_FreeOutBufs(CLzma2Enc *p) +{ + unsigned i; + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + if (p->outBufs[i]) + { + ISzAlloc_Free(p->alloc, p->outBufs[i]); + p->outBufs[i] = NULL; + } + p->outBufSize = 0; +} + +#endif + + +void Lzma2Enc_Destroy(CLzma2EncHandle pp) +{ + CLzma2Enc *p = (CLzma2Enc *)pp; + unsigned i; + for (i = 0; i < MTCODER__THREADS_MAX; i++) + { + CLzma2EncInt *t = &p->coders[i]; + if (t->enc) + { + LzmaEnc_Destroy(t->enc, p->alloc, p->allocBig); + t->enc = NULL; + } + } + + + #ifndef _7ZIP_ST + if (p->mtCoder_WasConstructed) + { + MtCoder_Destruct(&p->mtCoder); + p->mtCoder_WasConstructed = False; + } + Lzma2Enc_FreeOutBufs(p); + #endif + + ISzAlloc_Free(p->alloc, p->tempBufLzma); + p->tempBufLzma = NULL; + + ISzAlloc_Free(p->alloc, pp); +} + + +SRes Lzma2Enc_SetProps(CLzma2EncHandle pp, const CLzma2EncProps *props) +{ + CLzma2Enc *p = (CLzma2Enc *)pp; + CLzmaEncProps lzmaProps = props->lzmaProps; + LzmaEncProps_Normalize(&lzmaProps); + if (lzmaProps.lc + lzmaProps.lp > LZMA2_LCLP_MAX) + return SZ_ERROR_PARAM; + p->props = *props; + Lzma2EncProps_Normalize(&p->props); + return SZ_OK; +} + + +void Lzma2Enc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize) +{ + CLzma2Enc *p = (CLzma2Enc *)pp; + p->expectedDataSize = expectedDataSiize; +} + + +Byte Lzma2Enc_WriteProperties(CLzma2EncHandle pp) +{ + CLzma2Enc *p = (CLzma2Enc *)pp; + unsigned i; + UInt32 dicSize = LzmaEncProps_GetDictSize(&p->props.lzmaProps); + for (i = 0; i < 40; i++) + if (dicSize <= LZMA2_DIC_SIZE_FROM_PROP(i)) + break; + return (Byte)i; +} + + +static SRes Lzma2Enc_EncodeMt1( + CLzma2Enc *me, + CLzma2EncInt *p, + ISeqOutStream *outStream, + Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + const Byte *inData, size_t inDataSize, + int finished, + ICompressProgress *progress) +{ + UInt64 unpackTotal = 0; + UInt64 packTotal = 0; + size_t outLim = 0; + CLimitedSeqInStream limitedInStream; + + if (outBuf) + { + outLim = *outBufSize; + *outBufSize = 0; + } + + if (!p->enc) + { + p->propsAreSet = False; + p->enc = LzmaEnc_Create(me->alloc); + if (!p->enc) + return SZ_ERROR_MEM; + } + + limitedInStream.realStream = inStream; + if (inStream) + { + limitedInStream.vt.Read = LimitedSeqInStream_Read; + } + + if (!outBuf) + { + // outStream version works only in one thread. So we use CLzma2Enc::tempBufLzma + if (!me->tempBufLzma) + { + me->tempBufLzma = (Byte *)ISzAlloc_Alloc(me->alloc, LZMA2_CHUNK_SIZE_COMPRESSED_MAX); + if (!me->tempBufLzma) + return SZ_ERROR_MEM; + } + } + + RINOK(Lzma2EncInt_InitStream(p, &me->props)); + + for (;;) + { + SRes res = SZ_OK; + size_t inSizeCur = 0; + + Lzma2EncInt_InitBlock(p); + + LimitedSeqInStream_Init(&limitedInStream); + limitedInStream.limit = me->props.blockSize; + + if (inStream) + { + UInt64 expected = (UInt64)(Int64)-1; + // inStream version works only in one thread. So we use CLzma2Enc::expectedDataSize + if (me->expectedDataSize != (UInt64)(Int64)-1 + && me->expectedDataSize >= unpackTotal) + expected = me->expectedDataSize - unpackTotal; + if (me->props.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID + && expected > me->props.blockSize) + expected = (size_t)me->props.blockSize; + + LzmaEnc_SetDataSize(p->enc, expected); + + RINOK(LzmaEnc_PrepareForLzma2(p->enc, + &limitedInStream.vt, + LZMA2_KEEP_WINDOW_SIZE, + me->alloc, + me->allocBig)); + } + else + { + inSizeCur = inDataSize - (size_t)unpackTotal; + if (me->props.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID + && inSizeCur > me->props.blockSize) + inSizeCur = (size_t)me->props.blockSize; + + // LzmaEnc_SetDataSize(p->enc, inSizeCur); + + RINOK(LzmaEnc_MemPrepare(p->enc, + inData + (size_t)unpackTotal, inSizeCur, + LZMA2_KEEP_WINDOW_SIZE, + me->alloc, + me->allocBig)); + } + + for (;;) + { + size_t packSize = LZMA2_CHUNK_SIZE_COMPRESSED_MAX; + if (outBuf) + packSize = outLim - (size_t)packTotal; + + res = Lzma2EncInt_EncodeSubblock(p, + outBuf ? outBuf + (size_t)packTotal : me->tempBufLzma, &packSize, + outBuf ? NULL : outStream); + + if (res != SZ_OK) + break; + + packTotal += packSize; + if (outBuf) + *outBufSize = (size_t)packTotal; + + res = Progress(progress, unpackTotal + p->srcPos, packTotal); + if (res != SZ_OK) + break; + + /* + if (LzmaEnc_GetNumAvailableBytes(p->enc) == 0) + break; + */ + + if (packSize == 0) + break; + } + + LzmaEnc_Finish(p->enc); + + unpackTotal += p->srcPos; + + RINOK(res); + + if (p->srcPos != (inStream ? limitedInStream.processed : inSizeCur)) + return SZ_ERROR_FAIL; + + if (inStream ? limitedInStream.finished : (unpackTotal == inDataSize)) + { + if (finished) + { + if (outBuf) + { + size_t destPos = *outBufSize; + if (destPos >= outLim) + return SZ_ERROR_OUTPUT_EOF; + outBuf[destPos] = 0; + *outBufSize = destPos + 1; + } + else + { + Byte b = 0; + if (ISeqOutStream_Write(outStream, &b, 1) != 1) + return SZ_ERROR_WRITE; + } + } + return SZ_OK; + } + } +} + + + +#ifndef _7ZIP_ST + +static SRes Lzma2Enc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBufIndex, + const Byte *src, size_t srcSize, int finished) +{ + CLzma2Enc *me = (CLzma2Enc *)pp; + size_t destSize = me->outBufSize; + SRes res; + CMtProgressThunk progressThunk; + + Byte *dest = me->outBufs[outBufIndex]; + + me->outBufsDataSizes[outBufIndex] = 0; + + if (!dest) + { + dest = (Byte *)ISzAlloc_Alloc(me->alloc, me->outBufSize); + if (!dest) + return SZ_ERROR_MEM; + me->outBufs[outBufIndex] = dest; + } + + MtProgressThunk_CreateVTable(&progressThunk); + progressThunk.mtProgress = &me->mtCoder.mtProgress; + progressThunk.inSize = 0; + progressThunk.outSize = 0; + + res = Lzma2Enc_EncodeMt1(me, + &me->coders[coderIndex], + NULL, dest, &destSize, + NULL, src, srcSize, + finished, + &progressThunk.vt); + + me->outBufsDataSizes[outBufIndex] = destSize; + + return res; +} + + +static SRes Lzma2Enc_MtCallback_Write(void *pp, unsigned outBufIndex) +{ + CLzma2Enc *me = (CLzma2Enc *)pp; + size_t size = me->outBufsDataSizes[outBufIndex]; + const Byte *data = me->outBufs[outBufIndex]; + + if (me->outStream) + return ISeqOutStream_Write(me->outStream, data, size) == size ? SZ_OK : SZ_ERROR_WRITE; + + if (size > me->outBuf_Rem) + return SZ_ERROR_OUTPUT_EOF; + memcpy(me->outBuf, data, size); + me->outBuf_Rem -= size; + me->outBuf += size; + return SZ_OK; +} + +#endif + + + +SRes Lzma2Enc_Encode2(CLzma2EncHandle pp, + ISeqOutStream *outStream, + Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + const Byte *inData, size_t inDataSize, + ICompressProgress *progress) +{ + CLzma2Enc *p = (CLzma2Enc *)pp; + + if (inStream && inData) + return SZ_ERROR_PARAM; + + if (outStream && outBuf) + return SZ_ERROR_PARAM; + + { + unsigned i; + for (i = 0; i < MTCODER__THREADS_MAX; i++) + p->coders[i].propsAreSet = False; + } + + #ifndef _7ZIP_ST + + if (p->props.numBlockThreads_Reduced > 1) + { + IMtCoderCallback2 vt; + + if (!p->mtCoder_WasConstructed) + { + p->mtCoder_WasConstructed = True; + MtCoder_Construct(&p->mtCoder); + } + + vt.Code = Lzma2Enc_MtCallback_Code; + vt.Write = Lzma2Enc_MtCallback_Write; + + p->outStream = outStream; + p->outBuf = NULL; + p->outBuf_Rem = 0; + if (!outStream) + { + p->outBuf = outBuf; + p->outBuf_Rem = *outBufSize; + *outBufSize = 0; + } + + p->mtCoder.allocBig = p->allocBig; + p->mtCoder.progress = progress; + p->mtCoder.inStream = inStream; + p->mtCoder.inData = inData; + p->mtCoder.inDataSize = inDataSize; + p->mtCoder.mtCallback = &vt; + p->mtCoder.mtCallbackObject = p; + + p->mtCoder.blockSize = (size_t)p->props.blockSize; + if (p->mtCoder.blockSize != p->props.blockSize) + return SZ_ERROR_PARAM; /* SZ_ERROR_MEM */ + + { + size_t destBlockSize = p->mtCoder.blockSize + (p->mtCoder.blockSize >> 10) + 16; + if (destBlockSize < p->mtCoder.blockSize) + return SZ_ERROR_PARAM; + if (p->outBufSize != destBlockSize) + Lzma2Enc_FreeOutBufs(p); + p->outBufSize = destBlockSize; + } + + p->mtCoder.numThreadsMax = p->props.numBlockThreads_Max; + p->mtCoder.expectedDataSize = p->expectedDataSize; + + { + SRes res = MtCoder_Code(&p->mtCoder); + if (!outStream) + *outBufSize = p->outBuf - outBuf; + return res; + } + } + + #endif + + + return Lzma2Enc_EncodeMt1(p, + &p->coders[0], + outStream, outBuf, outBufSize, + inStream, inData, inDataSize, + True, /* finished */ + progress); +} diff --git a/3rdparty/7z/src/Lzma2Enc.h b/3rdparty/7z/src/Lzma2Enc.h new file mode 100644 index 0000000000..65f2dd145d --- /dev/null +++ b/3rdparty/7z/src/Lzma2Enc.h @@ -0,0 +1,55 @@ +/* Lzma2Enc.h -- LZMA2 Encoder +2017-07-27 : Igor Pavlov : Public domain */ + +#ifndef __LZMA2_ENC_H +#define __LZMA2_ENC_H + +#include "LzmaEnc.h" + +EXTERN_C_BEGIN + +#define LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO 0 +#define LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID ((UInt64)(Int64)-1) + +typedef struct +{ + CLzmaEncProps lzmaProps; + UInt64 blockSize; + int numBlockThreads_Reduced; + int numBlockThreads_Max; + int numTotalThreads; +} CLzma2EncProps; + +void Lzma2EncProps_Init(CLzma2EncProps *p); +void Lzma2EncProps_Normalize(CLzma2EncProps *p); + +/* ---------- CLzmaEnc2Handle Interface ---------- */ + +/* Lzma2Enc_* functions can return the following exit codes: +SRes: + SZ_OK - OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_PARAM - Incorrect paramater in props + SZ_ERROR_WRITE - ISeqOutStream write callback error + SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output + SZ_ERROR_PROGRESS - some break from progress callback + SZ_ERROR_THREAD - error in multithreading functions (only for Mt version) +*/ + +typedef void * CLzma2EncHandle; + +CLzma2EncHandle Lzma2Enc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig); +void Lzma2Enc_Destroy(CLzma2EncHandle p); +SRes Lzma2Enc_SetProps(CLzma2EncHandle p, const CLzma2EncProps *props); +void Lzma2Enc_SetDataSize(CLzma2EncHandle p, UInt64 expectedDataSiize); +Byte Lzma2Enc_WriteProperties(CLzma2EncHandle p); +SRes Lzma2Enc_Encode2(CLzma2EncHandle p, + ISeqOutStream *outStream, + Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + const Byte *inData, size_t inDataSize, + ICompressProgress *progress); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Lzma86.h b/3rdparty/7z/src/Lzma86.h new file mode 100644 index 0000000000..83057e5983 --- /dev/null +++ b/3rdparty/7z/src/Lzma86.h @@ -0,0 +1,111 @@ +/* Lzma86.h -- LZMA + x86 (BCJ) Filter +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __LZMA86_H +#define __LZMA86_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define LZMA86_SIZE_OFFSET (1 + 5) +#define LZMA86_HEADER_SIZE (LZMA86_SIZE_OFFSET + 8) + +/* +It's an example for LZMA + x86 Filter use. +You can use .lzma86 extension, if you write that stream to file. +.lzma86 header adds one additional byte to standard .lzma header. +.lzma86 header (14 bytes): + Offset Size Description + 0 1 = 0 - no filter, pure LZMA + = 1 - x86 filter + LZMA + 1 1 lc, lp and pb in encoded form + 2 4 dictSize (little endian) + 6 8 uncompressed size (little endian) + + +Lzma86_Encode +------------- +level - compression level: 0 <= level <= 9, the default value for "level" is 5. + +dictSize - The dictionary size in bytes. The maximum value is + 128 MB = (1 << 27) bytes for 32-bit version + 1 GB = (1 << 30) bytes for 64-bit version + The default value is 16 MB = (1 << 24) bytes, for level = 5. + It's recommended to use the dictionary that is larger than 4 KB and + that can be calculated as (1 << N) or (3 << N) sizes. + For better compression ratio dictSize must be >= inSize. + +filterMode: + SZ_FILTER_NO - no Filter + SZ_FILTER_YES - x86 Filter + SZ_FILTER_AUTO - it tries both alternatives to select best. + Encoder will use 2 or 3 passes: + 2 passes when FILTER_NO provides better compression. + 3 passes when FILTER_YES provides better compression. + +Lzma86Encode allocates Data with MyAlloc functions. +RAM Requirements for compressing: + RamSize = dictionarySize * 11.5 + 6MB + FilterBlockSize + filterMode FilterBlockSize + SZ_FILTER_NO 0 + SZ_FILTER_YES inSize + SZ_FILTER_AUTO inSize + + +Return code: + SZ_OK - OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_PARAM - Incorrect paramater + SZ_ERROR_OUTPUT_EOF - output buffer overflow + SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) +*/ + +enum ESzFilterMode +{ + SZ_FILTER_NO, + SZ_FILTER_YES, + SZ_FILTER_AUTO +}; + +SRes Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen, + int level, UInt32 dictSize, int filterMode); + + +/* +Lzma86_GetUnpackSize: + In: + src - input data + srcLen - input data size + Out: + unpackSize - size of uncompressed stream + Return code: + SZ_OK - OK + SZ_ERROR_INPUT_EOF - Error in headers +*/ + +SRes Lzma86_GetUnpackSize(const Byte *src, SizeT srcLen, UInt64 *unpackSize); + +/* +Lzma86_Decode: + In: + dest - output data + destLen - output data size + src - input data + srcLen - input data size + Out: + destLen - processed output size + srcLen - processed input size + Return code: + SZ_OK - OK + SZ_ERROR_DATA - Data error + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_UNSUPPORTED - unsupported file + SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer +*/ + +SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Lzma86Dec.c b/3rdparty/7z/src/Lzma86Dec.c new file mode 100644 index 0000000000..20ac5e7a97 --- /dev/null +++ b/3rdparty/7z/src/Lzma86Dec.c @@ -0,0 +1,54 @@ +/* Lzma86Dec.c -- LZMA + x86 (BCJ) Filter Decoder +2016-05-16 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "Lzma86.h" + +#include "Alloc.h" +#include "Bra.h" +#include "LzmaDec.h" + +SRes Lzma86_GetUnpackSize(const Byte *src, SizeT srcLen, UInt64 *unpackSize) +{ + unsigned i; + if (srcLen < LZMA86_HEADER_SIZE) + return SZ_ERROR_INPUT_EOF; + *unpackSize = 0; + for (i = 0; i < sizeof(UInt64); i++) + *unpackSize += ((UInt64)src[LZMA86_SIZE_OFFSET + i]) << (8 * i); + return SZ_OK; +} + +SRes Lzma86_Decode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen) +{ + SRes res; + int useFilter; + SizeT inSizePure; + ELzmaStatus status; + + if (*srcLen < LZMA86_HEADER_SIZE) + return SZ_ERROR_INPUT_EOF; + + useFilter = src[0]; + + if (useFilter > 1) + { + *destLen = 0; + return SZ_ERROR_UNSUPPORTED; + } + + inSizePure = *srcLen - LZMA86_HEADER_SIZE; + res = LzmaDecode(dest, destLen, src + LZMA86_HEADER_SIZE, &inSizePure, + src + 1, LZMA_PROPS_SIZE, LZMA_FINISH_ANY, &status, &g_Alloc); + *srcLen = inSizePure + LZMA86_HEADER_SIZE; + if (res != SZ_OK) + return res; + if (useFilter == 1) + { + UInt32 x86State; + x86_Convert_Init(x86State); + x86_Convert(dest, *destLen, 0, &x86State, 0); + } + return SZ_OK; +} diff --git a/3rdparty/7z/src/Lzma86Enc.c b/3rdparty/7z/src/Lzma86Enc.c new file mode 100644 index 0000000000..8d35e6dc5e --- /dev/null +++ b/3rdparty/7z/src/Lzma86Enc.c @@ -0,0 +1,106 @@ +/* Lzma86Enc.c -- LZMA + x86 (BCJ) Filter Encoder +2018-07-04 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "Lzma86.h" + +#include "Alloc.h" +#include "Bra.h" +#include "LzmaEnc.h" + +#define SZE_OUT_OVERFLOW SZE_DATA_ERROR + +int Lzma86_Encode(Byte *dest, size_t *destLen, const Byte *src, size_t srcLen, + int level, UInt32 dictSize, int filterMode) +{ + size_t outSize2 = *destLen; + Byte *filteredStream; + BoolInt useFilter; + int mainResult = SZ_ERROR_OUTPUT_EOF; + CLzmaEncProps props; + LzmaEncProps_Init(&props); + props.level = level; + props.dictSize = dictSize; + + *destLen = 0; + if (outSize2 < LZMA86_HEADER_SIZE) + return SZ_ERROR_OUTPUT_EOF; + + { + int i; + UInt64 t = srcLen; + for (i = 0; i < 8; i++, t >>= 8) + dest[LZMA86_SIZE_OFFSET + i] = (Byte)t; + } + + filteredStream = 0; + useFilter = (filterMode != SZ_FILTER_NO); + if (useFilter) + { + if (srcLen != 0) + { + filteredStream = (Byte *)MyAlloc(srcLen); + if (filteredStream == 0) + return SZ_ERROR_MEM; + memcpy(filteredStream, src, srcLen); + } + { + UInt32 x86State; + x86_Convert_Init(x86State); + x86_Convert(filteredStream, srcLen, 0, &x86State, 1); + } + } + + { + size_t minSize = 0; + BoolInt bestIsFiltered = False; + + /* passes for SZ_FILTER_AUTO: + 0 - BCJ + LZMA + 1 - LZMA + 2 - BCJ + LZMA agaian, if pass 0 (BCJ + LZMA) is better. + */ + int numPasses = (filterMode == SZ_FILTER_AUTO) ? 3 : 1; + + int i; + for (i = 0; i < numPasses; i++) + { + size_t outSizeProcessed = outSize2 - LZMA86_HEADER_SIZE; + size_t outPropsSize = 5; + SRes curRes; + BoolInt curModeIsFiltered = (numPasses > 1 && i == numPasses - 1); + if (curModeIsFiltered && !bestIsFiltered) + break; + if (useFilter && i == 0) + curModeIsFiltered = True; + + curRes = LzmaEncode(dest + LZMA86_HEADER_SIZE, &outSizeProcessed, + curModeIsFiltered ? filteredStream : src, srcLen, + &props, dest + 1, &outPropsSize, 0, + NULL, &g_Alloc, &g_Alloc); + + if (curRes != SZ_ERROR_OUTPUT_EOF) + { + if (curRes != SZ_OK) + { + mainResult = curRes; + break; + } + if (outSizeProcessed <= minSize || mainResult != SZ_OK) + { + minSize = outSizeProcessed; + bestIsFiltered = curModeIsFiltered; + mainResult = SZ_OK; + } + } + } + dest[0] = (Byte)(bestIsFiltered ? 1 : 0); + *destLen = LZMA86_HEADER_SIZE + minSize; + } + if (useFilter) + MyFree(filteredStream); + return mainResult; +} diff --git a/3rdparty/7z/src/LzmaDec.c b/3rdparty/7z/src/LzmaDec.c new file mode 100644 index 0000000000..4d1576419f --- /dev/null +++ b/3rdparty/7z/src/LzmaDec.c @@ -0,0 +1,1185 @@ +/* LzmaDec.c -- LZMA Decoder +2018-07-04 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +/* #include "CpuArch.h" */ +#include "LzmaDec.h" + +#define kNumTopBits 24 +#define kTopValue ((UInt32)1 << kNumTopBits) + +#define kNumBitModelTotalBits 11 +#define kBitModelTotal (1 << kNumBitModelTotalBits) +#define kNumMoveBits 5 + +#define RC_INIT_SIZE 5 + +#define NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | (*buf++); } + +#define IF_BIT_0(p) ttt = *(p); NORMALIZE; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound) +#define UPDATE_0(p) range = bound; *(p) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); +#define UPDATE_1(p) range -= bound; code -= bound; *(p) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); +#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \ + { UPDATE_0(p); i = (i + i); A0; } else \ + { UPDATE_1(p); i = (i + i) + 1; A1; } + +#define TREE_GET_BIT(probs, i) { GET_BIT2(probs + i, i, ;, ;); } + +#define REV_BIT(p, i, A0, A1) IF_BIT_0(p + i) \ + { UPDATE_0(p + i); A0; } else \ + { UPDATE_1(p + i); A1; } +#define REV_BIT_VAR( p, i, m) REV_BIT(p, i, i += m; m += m, m += m; i += m; ) +#define REV_BIT_CONST(p, i, m) REV_BIT(p, i, i += m; , i += m * 2; ) +#define REV_BIT_LAST( p, i, m) REV_BIT(p, i, i -= m , ; ) + +#define TREE_DECODE(probs, limit, i) \ + { i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; } + +/* #define _LZMA_SIZE_OPT */ + +#ifdef _LZMA_SIZE_OPT +#define TREE_6_DECODE(probs, i) TREE_DECODE(probs, (1 << 6), i) +#else +#define TREE_6_DECODE(probs, i) \ + { i = 1; \ + TREE_GET_BIT(probs, i); \ + TREE_GET_BIT(probs, i); \ + TREE_GET_BIT(probs, i); \ + TREE_GET_BIT(probs, i); \ + TREE_GET_BIT(probs, i); \ + TREE_GET_BIT(probs, i); \ + i -= 0x40; } +#endif + +#define NORMAL_LITER_DEC TREE_GET_BIT(prob, symbol) +#define MATCHED_LITER_DEC \ + matchByte += matchByte; \ + bit = offs; \ + offs &= matchByte; \ + probLit = prob + (offs + bit + symbol); \ + GET_BIT2(probLit, symbol, offs ^= bit; , ;) + + + +#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); } + +#define IF_BIT_0_CHECK(p) ttt = *(p); NORMALIZE_CHECK; bound = (range >> kNumBitModelTotalBits) * (UInt32)ttt; if (code < bound) +#define UPDATE_0_CHECK range = bound; +#define UPDATE_1_CHECK range -= bound; code -= bound; +#define GET_BIT2_CHECK(p, i, A0, A1) IF_BIT_0_CHECK(p) \ + { UPDATE_0_CHECK; i = (i + i); A0; } else \ + { UPDATE_1_CHECK; i = (i + i) + 1; A1; } +#define GET_BIT_CHECK(p, i) GET_BIT2_CHECK(p, i, ; , ;) +#define TREE_DECODE_CHECK(probs, limit, i) \ + { i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; } + + +#define REV_BIT_CHECK(p, i, m) IF_BIT_0_CHECK(p + i) \ + { UPDATE_0_CHECK; i += m; m += m; } else \ + { UPDATE_1_CHECK; m += m; i += m; } + + +#define kNumPosBitsMax 4 +#define kNumPosStatesMax (1 << kNumPosBitsMax) + +#define kLenNumLowBits 3 +#define kLenNumLowSymbols (1 << kLenNumLowBits) +#define kLenNumHighBits 8 +#define kLenNumHighSymbols (1 << kLenNumHighBits) + +#define LenLow 0 +#define LenHigh (LenLow + 2 * (kNumPosStatesMax << kLenNumLowBits)) +#define kNumLenProbs (LenHigh + kLenNumHighSymbols) + +#define LenChoice LenLow +#define LenChoice2 (LenLow + (1 << kLenNumLowBits)) + +#define kNumStates 12 +#define kNumStates2 16 +#define kNumLitStates 7 + +#define kStartPosModelIndex 4 +#define kEndPosModelIndex 14 +#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) + +#define kNumPosSlotBits 6 +#define kNumLenToPosStates 4 + +#define kNumAlignBits 4 +#define kAlignTableSize (1 << kNumAlignBits) + +#define kMatchMinLen 2 +#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols * 2 + kLenNumHighSymbols) + +/* External ASM code needs same CLzmaProb array layout. So don't change it. */ + +/* (probs_1664) is faster and better for code size at some platforms */ +/* +#ifdef MY_CPU_X86_OR_AMD64 +*/ +#define kStartOffset 1664 +#define GET_PROBS p->probs_1664 +/* +#define GET_PROBS p->probs + kStartOffset +#else +#define kStartOffset 0 +#define GET_PROBS p->probs +#endif +*/ + +#define SpecPos (-kStartOffset) +#define IsRep0Long (SpecPos + kNumFullDistances) +#define RepLenCoder (IsRep0Long + (kNumStates2 << kNumPosBitsMax)) +#define LenCoder (RepLenCoder + kNumLenProbs) +#define IsMatch (LenCoder + kNumLenProbs) +#define Align (IsMatch + (kNumStates2 << kNumPosBitsMax)) +#define IsRep (Align + kAlignTableSize) +#define IsRepG0 (IsRep + kNumStates) +#define IsRepG1 (IsRepG0 + kNumStates) +#define IsRepG2 (IsRepG1 + kNumStates) +#define PosSlot (IsRepG2 + kNumStates) +#define Literal (PosSlot + (kNumLenToPosStates << kNumPosSlotBits)) +#define NUM_BASE_PROBS (Literal + kStartOffset) + +#if Align != 0 && kStartOffset != 0 + #error Stop_Compiling_Bad_LZMA_kAlign +#endif + +#if NUM_BASE_PROBS != 1984 + #error Stop_Compiling_Bad_LZMA_PROBS +#endif + + +#define LZMA_LIT_SIZE 0x300 + +#define LzmaProps_GetNumProbs(p) (NUM_BASE_PROBS + ((UInt32)LZMA_LIT_SIZE << ((p)->lc + (p)->lp))) + + +#define CALC_POS_STATE(processedPos, pbMask) (((processedPos) & (pbMask)) << 4) +#define COMBINED_PS_STATE (posState + state) +#define GET_LEN_STATE (posState) + +#define LZMA_DIC_MIN (1 << 12) + +/* +p->remainLen : shows status of LZMA decoder: + < kMatchSpecLenStart : normal remain + = kMatchSpecLenStart : finished + = kMatchSpecLenStart + 1 : need init range coder + = kMatchSpecLenStart + 2 : need init range coder and state +*/ + +/* ---------- LZMA_DECODE_REAL ---------- */ +/* +LzmaDec_DecodeReal_3() can be implemented in external ASM file. +3 - is the code compatibility version of that function for check at link time. +*/ + +#define LZMA_DECODE_REAL LzmaDec_DecodeReal_3 + +/* +LZMA_DECODE_REAL() +In: + RangeCoder is normalized + if (p->dicPos == limit) + { + LzmaDec_TryDummy() was called before to exclude LITERAL and MATCH-REP cases. + So first symbol can be only MATCH-NON-REP. And if that MATCH-NON-REP symbol + is not END_OF_PAYALOAD_MARKER, then function returns error code. + } + +Processing: + first LZMA symbol will be decoded in any case + All checks for limits are at the end of main loop, + It will decode new LZMA-symbols while (p->buf < bufLimit && dicPos < limit), + RangeCoder is still without last normalization when (p->buf < bufLimit) is being checked. + +Out: + RangeCoder is normalized + Result: + SZ_OK - OK + SZ_ERROR_DATA - Error + p->remainLen: + < kMatchSpecLenStart : normal remain + = kMatchSpecLenStart : finished +*/ + + +#ifdef _LZMA_DEC_OPT + +int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit); + +#else + +static +int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit) +{ + CLzmaProb *probs = GET_PROBS; + unsigned state = (unsigned)p->state; + UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3]; + unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1; + unsigned lc = p->prop.lc; + unsigned lpMask = ((unsigned)0x100 << p->prop.lp) - ((unsigned)0x100 >> lc); + + Byte *dic = p->dic; + SizeT dicBufSize = p->dicBufSize; + SizeT dicPos = p->dicPos; + + UInt32 processedPos = p->processedPos; + UInt32 checkDicSize = p->checkDicSize; + unsigned len = 0; + + const Byte *buf = p->buf; + UInt32 range = p->range; + UInt32 code = p->code; + + do + { + CLzmaProb *prob; + UInt32 bound; + unsigned ttt; + unsigned posState = CALC_POS_STATE(processedPos, pbMask); + + prob = probs + IsMatch + COMBINED_PS_STATE; + IF_BIT_0(prob) + { + unsigned symbol; + UPDATE_0(prob); + prob = probs + Literal; + if (processedPos != 0 || checkDicSize != 0) + prob += (UInt32)3 * ((((processedPos << 8) + dic[(dicPos == 0 ? dicBufSize : dicPos) - 1]) & lpMask) << lc); + processedPos++; + + if (state < kNumLitStates) + { + state -= (state < 4) ? state : 3; + symbol = 1; + #ifdef _LZMA_SIZE_OPT + do { NORMAL_LITER_DEC } while (symbol < 0x100); + #else + NORMAL_LITER_DEC + NORMAL_LITER_DEC + NORMAL_LITER_DEC + NORMAL_LITER_DEC + NORMAL_LITER_DEC + NORMAL_LITER_DEC + NORMAL_LITER_DEC + NORMAL_LITER_DEC + #endif + } + else + { + unsigned matchByte = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)]; + unsigned offs = 0x100; + state -= (state < 10) ? 3 : 6; + symbol = 1; + #ifdef _LZMA_SIZE_OPT + do + { + unsigned bit; + CLzmaProb *probLit; + MATCHED_LITER_DEC + } + while (symbol < 0x100); + #else + { + unsigned bit; + CLzmaProb *probLit; + MATCHED_LITER_DEC + MATCHED_LITER_DEC + MATCHED_LITER_DEC + MATCHED_LITER_DEC + MATCHED_LITER_DEC + MATCHED_LITER_DEC + MATCHED_LITER_DEC + MATCHED_LITER_DEC + } + #endif + } + + dic[dicPos++] = (Byte)symbol; + continue; + } + + { + UPDATE_1(prob); + prob = probs + IsRep + state; + IF_BIT_0(prob) + { + UPDATE_0(prob); + state += kNumStates; + prob = probs + LenCoder; + } + else + { + UPDATE_1(prob); + /* + // that case was checked before with kBadRepCode + if (checkDicSize == 0 && processedPos == 0) + return SZ_ERROR_DATA; + */ + prob = probs + IsRepG0 + state; + IF_BIT_0(prob) + { + UPDATE_0(prob); + prob = probs + IsRep0Long + COMBINED_PS_STATE; + IF_BIT_0(prob) + { + UPDATE_0(prob); + dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)]; + dicPos++; + processedPos++; + state = state < kNumLitStates ? 9 : 11; + continue; + } + UPDATE_1(prob); + } + else + { + UInt32 distance; + UPDATE_1(prob); + prob = probs + IsRepG1 + state; + IF_BIT_0(prob) + { + UPDATE_0(prob); + distance = rep1; + } + else + { + UPDATE_1(prob); + prob = probs + IsRepG2 + state; + IF_BIT_0(prob) + { + UPDATE_0(prob); + distance = rep2; + } + else + { + UPDATE_1(prob); + distance = rep3; + rep3 = rep2; + } + rep2 = rep1; + } + rep1 = rep0; + rep0 = distance; + } + state = state < kNumLitStates ? 8 : 11; + prob = probs + RepLenCoder; + } + + #ifdef _LZMA_SIZE_OPT + { + unsigned lim, offset; + CLzmaProb *probLen = prob + LenChoice; + IF_BIT_0(probLen) + { + UPDATE_0(probLen); + probLen = prob + LenLow + GET_LEN_STATE; + offset = 0; + lim = (1 << kLenNumLowBits); + } + else + { + UPDATE_1(probLen); + probLen = prob + LenChoice2; + IF_BIT_0(probLen) + { + UPDATE_0(probLen); + probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits); + offset = kLenNumLowSymbols; + lim = (1 << kLenNumLowBits); + } + else + { + UPDATE_1(probLen); + probLen = prob + LenHigh; + offset = kLenNumLowSymbols * 2; + lim = (1 << kLenNumHighBits); + } + } + TREE_DECODE(probLen, lim, len); + len += offset; + } + #else + { + CLzmaProb *probLen = prob + LenChoice; + IF_BIT_0(probLen) + { + UPDATE_0(probLen); + probLen = prob + LenLow + GET_LEN_STATE; + len = 1; + TREE_GET_BIT(probLen, len); + TREE_GET_BIT(probLen, len); + TREE_GET_BIT(probLen, len); + len -= 8; + } + else + { + UPDATE_1(probLen); + probLen = prob + LenChoice2; + IF_BIT_0(probLen) + { + UPDATE_0(probLen); + probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits); + len = 1; + TREE_GET_BIT(probLen, len); + TREE_GET_BIT(probLen, len); + TREE_GET_BIT(probLen, len); + } + else + { + UPDATE_1(probLen); + probLen = prob + LenHigh; + TREE_DECODE(probLen, (1 << kLenNumHighBits), len); + len += kLenNumLowSymbols * 2; + } + } + } + #endif + + if (state >= kNumStates) + { + UInt32 distance; + prob = probs + PosSlot + + ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << kNumPosSlotBits); + TREE_6_DECODE(prob, distance); + if (distance >= kStartPosModelIndex) + { + unsigned posSlot = (unsigned)distance; + unsigned numDirectBits = (unsigned)(((distance >> 1) - 1)); + distance = (2 | (distance & 1)); + if (posSlot < kEndPosModelIndex) + { + distance <<= numDirectBits; + prob = probs + SpecPos; + { + UInt32 m = 1; + distance++; + do + { + REV_BIT_VAR(prob, distance, m); + } + while (--numDirectBits); + distance -= m; + } + } + else + { + numDirectBits -= kNumAlignBits; + do + { + NORMALIZE + range >>= 1; + + { + UInt32 t; + code -= range; + t = (0 - ((UInt32)code >> 31)); /* (UInt32)((Int32)code >> 31) */ + distance = (distance << 1) + (t + 1); + code += range & t; + } + /* + distance <<= 1; + if (code >= range) + { + code -= range; + distance |= 1; + } + */ + } + while (--numDirectBits); + prob = probs + Align; + distance <<= kNumAlignBits; + { + unsigned i = 1; + REV_BIT_CONST(prob, i, 1); + REV_BIT_CONST(prob, i, 2); + REV_BIT_CONST(prob, i, 4); + REV_BIT_LAST (prob, i, 8); + distance |= i; + } + if (distance == (UInt32)0xFFFFFFFF) + { + len = kMatchSpecLenStart; + state -= kNumStates; + break; + } + } + } + + rep3 = rep2; + rep2 = rep1; + rep1 = rep0; + rep0 = distance + 1; + state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3; + if (distance >= (checkDicSize == 0 ? processedPos: checkDicSize)) + { + p->dicPos = dicPos; + return SZ_ERROR_DATA; + } + } + + len += kMatchMinLen; + + { + SizeT rem; + unsigned curLen; + SizeT pos; + + if ((rem = limit - dicPos) == 0) + { + p->dicPos = dicPos; + return SZ_ERROR_DATA; + } + + curLen = ((rem < len) ? (unsigned)rem : len); + pos = dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0); + + processedPos += (UInt32)curLen; + + len -= curLen; + if (curLen <= dicBufSize - pos) + { + Byte *dest = dic + dicPos; + ptrdiff_t src = (ptrdiff_t)pos - (ptrdiff_t)dicPos; + const Byte *lim = dest + curLen; + dicPos += (SizeT)curLen; + do + *(dest) = (Byte)*(dest + src); + while (++dest != lim); + } + else + { + do + { + dic[dicPos++] = dic[pos]; + if (++pos == dicBufSize) + pos = 0; + } + while (--curLen != 0); + } + } + } + } + while (dicPos < limit && buf < bufLimit); + + NORMALIZE; + + p->buf = buf; + p->range = range; + p->code = code; + p->remainLen = (UInt32)len; + p->dicPos = dicPos; + p->processedPos = processedPos; + p->reps[0] = rep0; + p->reps[1] = rep1; + p->reps[2] = rep2; + p->reps[3] = rep3; + p->state = (UInt32)state; + + return SZ_OK; +} +#endif + +static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit) +{ + if (p->remainLen != 0 && p->remainLen < kMatchSpecLenStart) + { + Byte *dic = p->dic; + SizeT dicPos = p->dicPos; + SizeT dicBufSize = p->dicBufSize; + unsigned len = (unsigned)p->remainLen; + SizeT rep0 = p->reps[0]; /* we use SizeT to avoid the BUG of VC14 for AMD64 */ + SizeT rem = limit - dicPos; + if (rem < len) + len = (unsigned)(rem); + + if (p->checkDicSize == 0 && p->prop.dicSize - p->processedPos <= len) + p->checkDicSize = p->prop.dicSize; + + p->processedPos += (UInt32)len; + p->remainLen -= (UInt32)len; + while (len != 0) + { + len--; + dic[dicPos] = dic[dicPos - rep0 + (dicPos < rep0 ? dicBufSize : 0)]; + dicPos++; + } + p->dicPos = dicPos; + } +} + + +#define kRange0 0xFFFFFFFF +#define kBound0 ((kRange0 >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1)) +#define kBadRepCode (kBound0 + (((kRange0 - kBound0) >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1))) +#if kBadRepCode != (0xC0000000 - 0x400) + #error Stop_Compiling_Bad_LZMA_Check +#endif + +static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit) +{ + do + { + SizeT limit2 = limit; + if (p->checkDicSize == 0) + { + UInt32 rem = p->prop.dicSize - p->processedPos; + if (limit - p->dicPos > rem) + limit2 = p->dicPos + rem; + + if (p->processedPos == 0) + if (p->code >= kBadRepCode) + return SZ_ERROR_DATA; + } + + RINOK(LZMA_DECODE_REAL(p, limit2, bufLimit)); + + if (p->checkDicSize == 0 && p->processedPos >= p->prop.dicSize) + p->checkDicSize = p->prop.dicSize; + + LzmaDec_WriteRem(p, limit); + } + while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart); + + return 0; +} + +typedef enum +{ + DUMMY_ERROR, /* unexpected end of input stream */ + DUMMY_LIT, + DUMMY_MATCH, + DUMMY_REP +} ELzmaDummy; + +static ELzmaDummy LzmaDec_TryDummy(const CLzmaDec *p, const Byte *buf, SizeT inSize) +{ + UInt32 range = p->range; + UInt32 code = p->code; + const Byte *bufLimit = buf + inSize; + const CLzmaProb *probs = GET_PROBS; + unsigned state = (unsigned)p->state; + ELzmaDummy res; + + { + const CLzmaProb *prob; + UInt32 bound; + unsigned ttt; + unsigned posState = CALC_POS_STATE(p->processedPos, (1 << p->prop.pb) - 1); + + prob = probs + IsMatch + COMBINED_PS_STATE; + IF_BIT_0_CHECK(prob) + { + UPDATE_0_CHECK + + /* if (bufLimit - buf >= 7) return DUMMY_LIT; */ + + prob = probs + Literal; + if (p->checkDicSize != 0 || p->processedPos != 0) + prob += ((UInt32)LZMA_LIT_SIZE * + ((((p->processedPos) & ((1 << (p->prop.lp)) - 1)) << p->prop.lc) + + (p->dic[(p->dicPos == 0 ? p->dicBufSize : p->dicPos) - 1] >> (8 - p->prop.lc)))); + + if (state < kNumLitStates) + { + unsigned symbol = 1; + do { GET_BIT_CHECK(prob + symbol, symbol) } while (symbol < 0x100); + } + else + { + unsigned matchByte = p->dic[p->dicPos - p->reps[0] + + (p->dicPos < p->reps[0] ? p->dicBufSize : 0)]; + unsigned offs = 0x100; + unsigned symbol = 1; + do + { + unsigned bit; + const CLzmaProb *probLit; + matchByte += matchByte; + bit = offs; + offs &= matchByte; + probLit = prob + (offs + bit + symbol); + GET_BIT2_CHECK(probLit, symbol, offs ^= bit; , ; ) + } + while (symbol < 0x100); + } + res = DUMMY_LIT; + } + else + { + unsigned len; + UPDATE_1_CHECK; + + prob = probs + IsRep + state; + IF_BIT_0_CHECK(prob) + { + UPDATE_0_CHECK; + state = 0; + prob = probs + LenCoder; + res = DUMMY_MATCH; + } + else + { + UPDATE_1_CHECK; + res = DUMMY_REP; + prob = probs + IsRepG0 + state; + IF_BIT_0_CHECK(prob) + { + UPDATE_0_CHECK; + prob = probs + IsRep0Long + COMBINED_PS_STATE; + IF_BIT_0_CHECK(prob) + { + UPDATE_0_CHECK; + NORMALIZE_CHECK; + return DUMMY_REP; + } + else + { + UPDATE_1_CHECK; + } + } + else + { + UPDATE_1_CHECK; + prob = probs + IsRepG1 + state; + IF_BIT_0_CHECK(prob) + { + UPDATE_0_CHECK; + } + else + { + UPDATE_1_CHECK; + prob = probs + IsRepG2 + state; + IF_BIT_0_CHECK(prob) + { + UPDATE_0_CHECK; + } + else + { + UPDATE_1_CHECK; + } + } + } + state = kNumStates; + prob = probs + RepLenCoder; + } + { + unsigned limit, offset; + const CLzmaProb *probLen = prob + LenChoice; + IF_BIT_0_CHECK(probLen) + { + UPDATE_0_CHECK; + probLen = prob + LenLow + GET_LEN_STATE; + offset = 0; + limit = 1 << kLenNumLowBits; + } + else + { + UPDATE_1_CHECK; + probLen = prob + LenChoice2; + IF_BIT_0_CHECK(probLen) + { + UPDATE_0_CHECK; + probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits); + offset = kLenNumLowSymbols; + limit = 1 << kLenNumLowBits; + } + else + { + UPDATE_1_CHECK; + probLen = prob + LenHigh; + offset = kLenNumLowSymbols * 2; + limit = 1 << kLenNumHighBits; + } + } + TREE_DECODE_CHECK(probLen, limit, len); + len += offset; + } + + if (state < 4) + { + unsigned posSlot; + prob = probs + PosSlot + + ((len < kNumLenToPosStates - 1 ? len : kNumLenToPosStates - 1) << + kNumPosSlotBits); + TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot); + if (posSlot >= kStartPosModelIndex) + { + unsigned numDirectBits = ((posSlot >> 1) - 1); + + /* if (bufLimit - buf >= 8) return DUMMY_MATCH; */ + + if (posSlot < kEndPosModelIndex) + { + prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits); + } + else + { + numDirectBits -= kNumAlignBits; + do + { + NORMALIZE_CHECK + range >>= 1; + code -= range & (((code - range) >> 31) - 1); + /* if (code >= range) code -= range; */ + } + while (--numDirectBits); + prob = probs + Align; + numDirectBits = kNumAlignBits; + } + { + unsigned i = 1; + unsigned m = 1; + do + { + REV_BIT_CHECK(prob, i, m); + } + while (--numDirectBits); + } + } + } + } + } + NORMALIZE_CHECK; + return res; +} + + +void LzmaDec_InitDicAndState(CLzmaDec *p, BoolInt initDic, BoolInt initState) +{ + p->remainLen = kMatchSpecLenStart + 1; + p->tempBufSize = 0; + + if (initDic) + { + p->processedPos = 0; + p->checkDicSize = 0; + p->remainLen = kMatchSpecLenStart + 2; + } + if (initState) + p->remainLen = kMatchSpecLenStart + 2; +} + +void LzmaDec_Init(CLzmaDec *p) +{ + p->dicPos = 0; + LzmaDec_InitDicAndState(p, True, True); +} + + +SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen, + ELzmaFinishMode finishMode, ELzmaStatus *status) +{ + SizeT inSize = *srcLen; + (*srcLen) = 0; + + *status = LZMA_STATUS_NOT_SPECIFIED; + + if (p->remainLen > kMatchSpecLenStart) + { + for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--) + p->tempBuf[p->tempBufSize++] = *src++; + if (p->tempBufSize != 0 && p->tempBuf[0] != 0) + return SZ_ERROR_DATA; + if (p->tempBufSize < RC_INIT_SIZE) + { + *status = LZMA_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + p->code = + ((UInt32)p->tempBuf[1] << 24) + | ((UInt32)p->tempBuf[2] << 16) + | ((UInt32)p->tempBuf[3] << 8) + | ((UInt32)p->tempBuf[4]); + p->range = 0xFFFFFFFF; + p->tempBufSize = 0; + + if (p->remainLen > kMatchSpecLenStart + 1) + { + SizeT numProbs = LzmaProps_GetNumProbs(&p->prop); + SizeT i; + CLzmaProb *probs = p->probs; + for (i = 0; i < numProbs; i++) + probs[i] = kBitModelTotal >> 1; + p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1; + p->state = 0; + } + + p->remainLen = 0; + } + + LzmaDec_WriteRem(p, dicLimit); + + while (p->remainLen != kMatchSpecLenStart) + { + int checkEndMarkNow = 0; + + if (p->dicPos >= dicLimit) + { + if (p->remainLen == 0 && p->code == 0) + { + *status = LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK; + return SZ_OK; + } + if (finishMode == LZMA_FINISH_ANY) + { + *status = LZMA_STATUS_NOT_FINISHED; + return SZ_OK; + } + if (p->remainLen != 0) + { + *status = LZMA_STATUS_NOT_FINISHED; + return SZ_ERROR_DATA; + } + checkEndMarkNow = 1; + } + + if (p->tempBufSize == 0) + { + SizeT processed; + const Byte *bufLimit; + if (inSize < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) + { + int dummyRes = LzmaDec_TryDummy(p, src, inSize); + if (dummyRes == DUMMY_ERROR) + { + memcpy(p->tempBuf, src, inSize); + p->tempBufSize = (unsigned)inSize; + (*srcLen) += inSize; + *status = LZMA_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + if (checkEndMarkNow && dummyRes != DUMMY_MATCH) + { + *status = LZMA_STATUS_NOT_FINISHED; + return SZ_ERROR_DATA; + } + bufLimit = src; + } + else + bufLimit = src + inSize - LZMA_REQUIRED_INPUT_MAX; + p->buf = src; + if (LzmaDec_DecodeReal2(p, dicLimit, bufLimit) != 0) + return SZ_ERROR_DATA; + processed = (SizeT)(p->buf - src); + (*srcLen) += processed; + src += processed; + inSize -= processed; + } + else + { + unsigned rem = p->tempBufSize, lookAhead = 0; + while (rem < LZMA_REQUIRED_INPUT_MAX && lookAhead < inSize) + p->tempBuf[rem++] = src[lookAhead++]; + p->tempBufSize = rem; + if (rem < LZMA_REQUIRED_INPUT_MAX || checkEndMarkNow) + { + int dummyRes = LzmaDec_TryDummy(p, p->tempBuf, (SizeT)rem); + if (dummyRes == DUMMY_ERROR) + { + (*srcLen) += (SizeT)lookAhead; + *status = LZMA_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + if (checkEndMarkNow && dummyRes != DUMMY_MATCH) + { + *status = LZMA_STATUS_NOT_FINISHED; + return SZ_ERROR_DATA; + } + } + p->buf = p->tempBuf; + if (LzmaDec_DecodeReal2(p, dicLimit, p->buf) != 0) + return SZ_ERROR_DATA; + + { + unsigned kkk = (unsigned)(p->buf - p->tempBuf); + if (rem < kkk) + return SZ_ERROR_FAIL; /* some internal error */ + rem -= kkk; + if (lookAhead < rem) + return SZ_ERROR_FAIL; /* some internal error */ + lookAhead -= rem; + } + (*srcLen) += (SizeT)lookAhead; + src += lookAhead; + inSize -= (SizeT)lookAhead; + p->tempBufSize = 0; + } + } + + if (p->code != 0) + return SZ_ERROR_DATA; + *status = LZMA_STATUS_FINISHED_WITH_MARK; + return SZ_OK; +} + + +SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status) +{ + SizeT outSize = *destLen; + SizeT inSize = *srcLen; + *srcLen = *destLen = 0; + for (;;) + { + SizeT inSizeCur = inSize, outSizeCur, dicPos; + ELzmaFinishMode curFinishMode; + SRes res; + if (p->dicPos == p->dicBufSize) + p->dicPos = 0; + dicPos = p->dicPos; + if (outSize > p->dicBufSize - dicPos) + { + outSizeCur = p->dicBufSize; + curFinishMode = LZMA_FINISH_ANY; + } + else + { + outSizeCur = dicPos + outSize; + curFinishMode = finishMode; + } + + res = LzmaDec_DecodeToDic(p, outSizeCur, src, &inSizeCur, curFinishMode, status); + src += inSizeCur; + inSize -= inSizeCur; + *srcLen += inSizeCur; + outSizeCur = p->dicPos - dicPos; + memcpy(dest, p->dic + dicPos, outSizeCur); + dest += outSizeCur; + outSize -= outSizeCur; + *destLen += outSizeCur; + if (res != 0) + return res; + if (outSizeCur == 0 || outSize == 0) + return SZ_OK; + } +} + +void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->probs); + p->probs = NULL; +} + +static void LzmaDec_FreeDict(CLzmaDec *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->dic); + p->dic = NULL; +} + +void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc) +{ + LzmaDec_FreeProbs(p, alloc); + LzmaDec_FreeDict(p, alloc); +} + +SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size) +{ + UInt32 dicSize; + Byte d; + + if (size < LZMA_PROPS_SIZE) + return SZ_ERROR_UNSUPPORTED; + else + dicSize = data[1] | ((UInt32)data[2] << 8) | ((UInt32)data[3] << 16) | ((UInt32)data[4] << 24); + + if (dicSize < LZMA_DIC_MIN) + dicSize = LZMA_DIC_MIN; + p->dicSize = dicSize; + + d = data[0]; + if (d >= (9 * 5 * 5)) + return SZ_ERROR_UNSUPPORTED; + + p->lc = (Byte)(d % 9); + d /= 9; + p->pb = (Byte)(d / 5); + p->lp = (Byte)(d % 5); + + return SZ_OK; +} + +static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAllocPtr alloc) +{ + UInt32 numProbs = LzmaProps_GetNumProbs(propNew); + if (!p->probs || numProbs != p->numProbs) + { + LzmaDec_FreeProbs(p, alloc); + p->probs = (CLzmaProb *)ISzAlloc_Alloc(alloc, numProbs * sizeof(CLzmaProb)); + if (!p->probs) + return SZ_ERROR_MEM; + p->probs_1664 = p->probs + 1664; + p->numProbs = numProbs; + } + return SZ_OK; +} + +SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc) +{ + CLzmaProps propNew; + RINOK(LzmaProps_Decode(&propNew, props, propsSize)); + RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); + p->prop = propNew; + return SZ_OK; +} + +SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc) +{ + CLzmaProps propNew; + SizeT dicBufSize; + RINOK(LzmaProps_Decode(&propNew, props, propsSize)); + RINOK(LzmaDec_AllocateProbs2(p, &propNew, alloc)); + + { + UInt32 dictSize = propNew.dicSize; + SizeT mask = ((UInt32)1 << 12) - 1; + if (dictSize >= ((UInt32)1 << 30)) mask = ((UInt32)1 << 22) - 1; + else if (dictSize >= ((UInt32)1 << 22)) mask = ((UInt32)1 << 20) - 1;; + dicBufSize = ((SizeT)dictSize + mask) & ~mask; + if (dicBufSize < dictSize) + dicBufSize = dictSize; + } + + if (!p->dic || dicBufSize != p->dicBufSize) + { + LzmaDec_FreeDict(p, alloc); + p->dic = (Byte *)ISzAlloc_Alloc(alloc, dicBufSize); + if (!p->dic) + { + LzmaDec_FreeProbs(p, alloc); + return SZ_ERROR_MEM; + } + } + p->dicBufSize = dicBufSize; + p->prop = propNew; + return SZ_OK; +} + +SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, + ELzmaStatus *status, ISzAllocPtr alloc) +{ + CLzmaDec p; + SRes res; + SizeT outSize = *destLen, inSize = *srcLen; + *destLen = *srcLen = 0; + *status = LZMA_STATUS_NOT_SPECIFIED; + if (inSize < RC_INIT_SIZE) + return SZ_ERROR_INPUT_EOF; + LzmaDec_Construct(&p); + RINOK(LzmaDec_AllocateProbs(&p, propData, propSize, alloc)); + p.dic = dest; + p.dicBufSize = outSize; + LzmaDec_Init(&p); + *srcLen = inSize; + res = LzmaDec_DecodeToDic(&p, outSize, src, srcLen, finishMode, status); + *destLen = p.dicPos; + if (res == SZ_OK && *status == LZMA_STATUS_NEEDS_MORE_INPUT) + res = SZ_ERROR_INPUT_EOF; + LzmaDec_FreeProbs(&p, alloc); + return res; +} diff --git a/3rdparty/7z/src/LzmaDec.h b/3rdparty/7z/src/LzmaDec.h new file mode 100644 index 0000000000..28ce60c3ea --- /dev/null +++ b/3rdparty/7z/src/LzmaDec.h @@ -0,0 +1,234 @@ +/* LzmaDec.h -- LZMA Decoder +2018-04-21 : Igor Pavlov : Public domain */ + +#ifndef __LZMA_DEC_H +#define __LZMA_DEC_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +/* #define _LZMA_PROB32 */ +/* _LZMA_PROB32 can increase the speed on some CPUs, + but memory usage for CLzmaDec::probs will be doubled in that case */ + +typedef +#ifdef _LZMA_PROB32 + UInt32 +#else + UInt16 +#endif + CLzmaProb; + + +/* ---------- LZMA Properties ---------- */ + +#define LZMA_PROPS_SIZE 5 + +typedef struct _CLzmaProps +{ + Byte lc; + Byte lp; + Byte pb; + Byte _pad_; + UInt32 dicSize; +} CLzmaProps; + +/* LzmaProps_Decode - decodes properties +Returns: + SZ_OK + SZ_ERROR_UNSUPPORTED - Unsupported properties +*/ + +SRes LzmaProps_Decode(CLzmaProps *p, const Byte *data, unsigned size); + + +/* ---------- LZMA Decoder state ---------- */ + +/* LZMA_REQUIRED_INPUT_MAX = number of required input bytes for worst case. + Num bits = log2((2^11 / 31) ^ 22) + 26 < 134 + 26 = 160; */ + +#define LZMA_REQUIRED_INPUT_MAX 20 + +typedef struct +{ + /* Don't change this structure. ASM code can use it. */ + CLzmaProps prop; + CLzmaProb *probs; + CLzmaProb *probs_1664; + Byte *dic; + SizeT dicBufSize; + SizeT dicPos; + const Byte *buf; + UInt32 range; + UInt32 code; + UInt32 processedPos; + UInt32 checkDicSize; + UInt32 reps[4]; + UInt32 state; + UInt32 remainLen; + + UInt32 numProbs; + unsigned tempBufSize; + Byte tempBuf[LZMA_REQUIRED_INPUT_MAX]; +} CLzmaDec; + +#define LzmaDec_Construct(p) { (p)->dic = NULL; (p)->probs = NULL; } + +void LzmaDec_Init(CLzmaDec *p); + +/* There are two types of LZMA streams: + - Stream with end mark. That end mark adds about 6 bytes to compressed size. + - Stream without end mark. You must know exact uncompressed size to decompress such stream. */ + +typedef enum +{ + LZMA_FINISH_ANY, /* finish at any point */ + LZMA_FINISH_END /* block must be finished at the end */ +} ELzmaFinishMode; + +/* ELzmaFinishMode has meaning only if the decoding reaches output limit !!! + + You must use LZMA_FINISH_END, when you know that current output buffer + covers last bytes of block. In other cases you must use LZMA_FINISH_ANY. + + If LZMA decoder sees end marker before reaching output limit, it returns SZ_OK, + and output value of destLen will be less than output buffer size limit. + You can check status result also. + + You can use multiple checks to test data integrity after full decompression: + 1) Check Result and "status" variable. + 2) Check that output(destLen) = uncompressedSize, if you know real uncompressedSize. + 3) Check that output(srcLen) = compressedSize, if you know real compressedSize. + You must use correct finish mode in that case. */ + +typedef enum +{ + LZMA_STATUS_NOT_SPECIFIED, /* use main error code instead */ + LZMA_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */ + LZMA_STATUS_NOT_FINISHED, /* stream was not finished */ + LZMA_STATUS_NEEDS_MORE_INPUT, /* you must provide more input bytes */ + LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK /* there is probability that stream was finished without end mark */ +} ELzmaStatus; + +/* ELzmaStatus is used only as output value for function call */ + + +/* ---------- Interfaces ---------- */ + +/* There are 3 levels of interfaces: + 1) Dictionary Interface + 2) Buffer Interface + 3) One Call Interface + You can select any of these interfaces, but don't mix functions from different + groups for same object. */ + + +/* There are two variants to allocate state for Dictionary Interface: + 1) LzmaDec_Allocate / LzmaDec_Free + 2) LzmaDec_AllocateProbs / LzmaDec_FreeProbs + You can use variant 2, if you set dictionary buffer manually. + For Buffer Interface you must always use variant 1. + +LzmaDec_Allocate* can return: + SZ_OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_UNSUPPORTED - Unsupported properties +*/ + +SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc); +void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc); + +SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc); +void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc); + +/* ---------- Dictionary Interface ---------- */ + +/* You can use it, if you want to eliminate the overhead for data copying from + dictionary to some other external buffer. + You must work with CLzmaDec variables directly in this interface. + + STEPS: + LzmaDec_Construct() + LzmaDec_Allocate() + for (each new stream) + { + LzmaDec_Init() + while (it needs more decompression) + { + LzmaDec_DecodeToDic() + use data from CLzmaDec::dic and update CLzmaDec::dicPos + } + } + LzmaDec_Free() +*/ + +/* LzmaDec_DecodeToDic + + The decoding to internal dictionary buffer (CLzmaDec::dic). + You must manually update CLzmaDec::dicPos, if it reaches CLzmaDec::dicBufSize !!! + +finishMode: + It has meaning only if the decoding reaches output limit (dicLimit). + LZMA_FINISH_ANY - Decode just dicLimit bytes. + LZMA_FINISH_END - Stream must be finished after dicLimit. + +Returns: + SZ_OK + status: + LZMA_STATUS_FINISHED_WITH_MARK + LZMA_STATUS_NOT_FINISHED + LZMA_STATUS_NEEDS_MORE_INPUT + LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK + SZ_ERROR_DATA - Data error +*/ + +SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, + const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); + + +/* ---------- Buffer Interface ---------- */ + +/* It's zlib-like interface. + See LzmaDec_DecodeToDic description for information about STEPS and return results, + but you must use LzmaDec_DecodeToBuf instead of LzmaDec_DecodeToDic and you don't need + to work with CLzmaDec variables manually. + +finishMode: + It has meaning only if the decoding reaches output limit (*destLen). + LZMA_FINISH_ANY - Decode just destLen bytes. + LZMA_FINISH_END - Stream must be finished after (*destLen). +*/ + +SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status); + + +/* ---------- One Call Interface ---------- */ + +/* LzmaDecode + +finishMode: + It has meaning only if the decoding reaches output limit (*destLen). + LZMA_FINISH_ANY - Decode just destLen bytes. + LZMA_FINISH_END - Stream must be finished after (*destLen). + +Returns: + SZ_OK + status: + LZMA_STATUS_FINISHED_WITH_MARK + LZMA_STATUS_NOT_FINISHED + LZMA_STATUS_MAYBE_FINISHED_WITHOUT_MARK + SZ_ERROR_DATA - Data error + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_UNSUPPORTED - Unsupported properties + SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). +*/ + +SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode, + ELzmaStatus *status, ISzAllocPtr alloc); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/LzmaEnc.c b/3rdparty/7z/src/LzmaEnc.c new file mode 100644 index 0000000000..14086fc4f9 --- /dev/null +++ b/3rdparty/7z/src/LzmaEnc.c @@ -0,0 +1,2976 @@ +/* LzmaEnc.c -- LZMA Encoder +2019-01-10: Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +/* #define SHOW_STAT */ +/* #define SHOW_STAT2 */ + +#if defined(SHOW_STAT) || defined(SHOW_STAT2) +#include +#endif + +#include "LzmaEnc.h" + +#include "LzFind.h" +#ifndef _7ZIP_ST +#include "LzFindMt.h" +#endif + +#ifdef SHOW_STAT +static unsigned g_STAT_OFFSET = 0; +#endif + +#define kLzmaMaxHistorySize ((UInt32)3 << 29) +/* #define kLzmaMaxHistorySize ((UInt32)7 << 29) */ + +#define kNumTopBits 24 +#define kTopValue ((UInt32)1 << kNumTopBits) + +#define kNumBitModelTotalBits 11 +#define kBitModelTotal (1 << kNumBitModelTotalBits) +#define kNumMoveBits 5 +#define kProbInitValue (kBitModelTotal >> 1) + +#define kNumMoveReducingBits 4 +#define kNumBitPriceShiftBits 4 +#define kBitPrice (1 << kNumBitPriceShiftBits) + +#define REP_LEN_COUNT 64 + +void LzmaEncProps_Init(CLzmaEncProps *p) +{ + p->level = 5; + p->dictSize = p->mc = 0; + p->reduceSize = (UInt64)(Int64)-1; + p->lc = p->lp = p->pb = p->algo = p->fb = p->btMode = p->numHashBytes = p->numThreads = -1; + p->writeEndMark = 0; +} + +void LzmaEncProps_Normalize(CLzmaEncProps *p) +{ + int level = p->level; + if (level < 0) level = 5; + p->level = level; + + if (p->dictSize == 0) p->dictSize = (level <= 5 ? (1 << (level * 2 + 14)) : (level <= 7 ? (1 << 25) : (1 << 26))); + if (p->dictSize > p->reduceSize) + { + unsigned i; + UInt32 reduceSize = (UInt32)p->reduceSize; + for (i = 11; i <= 30; i++) + { + if (reduceSize <= ((UInt32)2 << i)) { p->dictSize = ((UInt32)2 << i); break; } + if (reduceSize <= ((UInt32)3 << i)) { p->dictSize = ((UInt32)3 << i); break; } + } + } + + if (p->lc < 0) p->lc = 3; + if (p->lp < 0) p->lp = 0; + if (p->pb < 0) p->pb = 2; + + if (p->algo < 0) p->algo = (level < 5 ? 0 : 1); + if (p->fb < 0) p->fb = (level < 7 ? 32 : 64); + if (p->btMode < 0) p->btMode = (p->algo == 0 ? 0 : 1); + if (p->numHashBytes < 0) p->numHashBytes = 4; + if (p->mc == 0) p->mc = (16 + (p->fb >> 1)) >> (p->btMode ? 0 : 1); + + if (p->numThreads < 0) + p->numThreads = + #ifndef _7ZIP_ST + ((p->btMode && p->algo) ? 2 : 1); + #else + 1; + #endif +} + +UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2) +{ + CLzmaEncProps props = *props2; + LzmaEncProps_Normalize(&props); + return props.dictSize; +} + +#if (_MSC_VER >= 1400) +/* BSR code is fast for some new CPUs */ +/* #define LZMA_LOG_BSR */ +#endif + +#ifdef LZMA_LOG_BSR + +#define kDicLogSizeMaxCompress 32 + +#define BSR2_RET(pos, res) { unsigned long zz; _BitScanReverse(&zz, (pos)); res = (zz + zz) + ((pos >> (zz - 1)) & 1); } + +static unsigned GetPosSlot1(UInt32 pos) +{ + unsigned res; + BSR2_RET(pos, res); + return res; +} +#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } +#define GetPosSlot(pos, res) { if (pos < 2) res = pos; else BSR2_RET(pos, res); } + +#else + +#define kNumLogBits (9 + sizeof(size_t) / 2) +/* #define kNumLogBits (11 + sizeof(size_t) / 8 * 3) */ + +#define kDicLogSizeMaxCompress ((kNumLogBits - 1) * 2 + 7) + +static void LzmaEnc_FastPosInit(Byte *g_FastPos) +{ + unsigned slot; + g_FastPos[0] = 0; + g_FastPos[1] = 1; + g_FastPos += 2; + + for (slot = 2; slot < kNumLogBits * 2; slot++) + { + size_t k = ((size_t)1 << ((slot >> 1) - 1)); + size_t j; + for (j = 0; j < k; j++) + g_FastPos[j] = (Byte)slot; + g_FastPos += k; + } +} + +/* we can use ((limit - pos) >> 31) only if (pos < ((UInt32)1 << 31)) */ +/* +#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \ + (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \ + res = p->g_FastPos[pos >> zz] + (zz * 2); } +*/ + +/* +#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \ + (0 - (((((UInt32)1 << (kNumLogBits)) - 1) - (pos >> 6)) >> 31))); \ + res = p->g_FastPos[pos >> zz] + (zz * 2); } +*/ + +#define BSR2_RET(pos, res) { unsigned zz = (pos < (1 << (kNumLogBits + 6))) ? 6 : 6 + kNumLogBits - 1; \ + res = p->g_FastPos[pos >> zz] + (zz * 2); } + +/* +#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \ + p->g_FastPos[pos >> 6] + 12 : \ + p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; } +*/ + +#define GetPosSlot1(pos) p->g_FastPos[pos] +#define GetPosSlot2(pos, res) { BSR2_RET(pos, res); } +#define GetPosSlot(pos, res) { if (pos < kNumFullDistances) res = p->g_FastPos[pos & (kNumFullDistances - 1)]; else BSR2_RET(pos, res); } + +#endif + + +#define LZMA_NUM_REPS 4 + +typedef UInt16 CState; +typedef UInt16 CExtra; + +typedef struct +{ + UInt32 price; + CState state; + CExtra extra; + // 0 : normal + // 1 : LIT : MATCH + // > 1 : MATCH (extra-1) : LIT : REP0 (len) + UInt32 len; + UInt32 dist; + UInt32 reps[LZMA_NUM_REPS]; +} COptimal; + + +// 18.06 +#define kNumOpts (1 << 11) +#define kPackReserve (kNumOpts * 8) +// #define kNumOpts (1 << 12) +// #define kPackReserve (1 + kNumOpts * 2) + +#define kNumLenToPosStates 4 +#define kNumPosSlotBits 6 +#define kDicLogSizeMin 0 +#define kDicLogSizeMax 32 +#define kDistTableSizeMax (kDicLogSizeMax * 2) + +#define kNumAlignBits 4 +#define kAlignTableSize (1 << kNumAlignBits) +#define kAlignMask (kAlignTableSize - 1) + +#define kStartPosModelIndex 4 +#define kEndPosModelIndex 14 +#define kNumFullDistances (1 << (kEndPosModelIndex >> 1)) + +typedef +#ifdef _LZMA_PROB32 + UInt32 +#else + UInt16 +#endif + CLzmaProb; + +#define LZMA_PB_MAX 4 +#define LZMA_LC_MAX 8 +#define LZMA_LP_MAX 4 + +#define LZMA_NUM_PB_STATES_MAX (1 << LZMA_PB_MAX) + +#define kLenNumLowBits 3 +#define kLenNumLowSymbols (1 << kLenNumLowBits) +#define kLenNumHighBits 8 +#define kLenNumHighSymbols (1 << kLenNumHighBits) +#define kLenNumSymbolsTotal (kLenNumLowSymbols * 2 + kLenNumHighSymbols) + +#define LZMA_MATCH_LEN_MIN 2 +#define LZMA_MATCH_LEN_MAX (LZMA_MATCH_LEN_MIN + kLenNumSymbolsTotal - 1) + +#define kNumStates 12 + + +typedef struct +{ + CLzmaProb low[LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)]; + CLzmaProb high[kLenNumHighSymbols]; +} CLenEnc; + + +typedef struct +{ + unsigned tableSize; + UInt32 prices[LZMA_NUM_PB_STATES_MAX][kLenNumSymbolsTotal]; + // UInt32 prices1[LZMA_NUM_PB_STATES_MAX][kLenNumLowSymbols * 2]; + // UInt32 prices2[kLenNumSymbolsTotal]; +} CLenPriceEnc; + +#define GET_PRICE_LEN(p, posState, len) \ + ((p)->prices[posState][(size_t)(len) - LZMA_MATCH_LEN_MIN]) + +/* +#define GET_PRICE_LEN(p, posState, len) \ + ((p)->prices2[(size_t)(len) - 2] + ((p)->prices1[posState][((len) - 2) & (kLenNumLowSymbols * 2 - 1)] & (((len) - 2 - kLenNumLowSymbols * 2) >> 9))) +*/ + +typedef struct +{ + UInt32 range; + unsigned cache; + UInt64 low; + UInt64 cacheSize; + Byte *buf; + Byte *bufLim; + Byte *bufBase; + ISeqOutStream *outStream; + UInt64 processed; + SRes res; +} CRangeEnc; + + +typedef struct +{ + CLzmaProb *litProbs; + + unsigned state; + UInt32 reps[LZMA_NUM_REPS]; + + CLzmaProb posAlignEncoder[1 << kNumAlignBits]; + CLzmaProb isRep[kNumStates]; + CLzmaProb isRepG0[kNumStates]; + CLzmaProb isRepG1[kNumStates]; + CLzmaProb isRepG2[kNumStates]; + CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; + CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; + + CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; + CLzmaProb posEncoders[kNumFullDistances]; + + CLenEnc lenProbs; + CLenEnc repLenProbs; + +} CSaveState; + + +typedef UInt32 CProbPrice; + + +typedef struct +{ + void *matchFinderObj; + IMatchFinder matchFinder; + + unsigned optCur; + unsigned optEnd; + + unsigned longestMatchLen; + unsigned numPairs; + UInt32 numAvail; + + unsigned state; + unsigned numFastBytes; + unsigned additionalOffset; + UInt32 reps[LZMA_NUM_REPS]; + unsigned lpMask, pbMask; + CLzmaProb *litProbs; + CRangeEnc rc; + + UInt32 backRes; + + unsigned lc, lp, pb; + unsigned lclp; + + BoolInt fastMode; + BoolInt writeEndMark; + BoolInt finished; + BoolInt multiThread; + BoolInt needInit; + // BoolInt _maxMode; + + UInt64 nowPos64; + + unsigned matchPriceCount; + // unsigned alignPriceCount; + int repLenEncCounter; + + unsigned distTableSize; + + UInt32 dictSize; + SRes result; + + #ifndef _7ZIP_ST + BoolInt mtMode; + // begin of CMatchFinderMt is used in LZ thread + CMatchFinderMt matchFinderMt; + // end of CMatchFinderMt is used in BT and HASH threads + #endif + + CMatchFinder matchFinderBase; + + #ifndef _7ZIP_ST + Byte pad[128]; + #endif + + // LZ thread + CProbPrice ProbPrices[kBitModelTotal >> kNumMoveReducingBits]; + + UInt32 matches[LZMA_MATCH_LEN_MAX * 2 + 2 + 1]; + + UInt32 alignPrices[kAlignTableSize]; + UInt32 posSlotPrices[kNumLenToPosStates][kDistTableSizeMax]; + UInt32 distancesPrices[kNumLenToPosStates][kNumFullDistances]; + + CLzmaProb posAlignEncoder[1 << kNumAlignBits]; + CLzmaProb isRep[kNumStates]; + CLzmaProb isRepG0[kNumStates]; + CLzmaProb isRepG1[kNumStates]; + CLzmaProb isRepG2[kNumStates]; + CLzmaProb isMatch[kNumStates][LZMA_NUM_PB_STATES_MAX]; + CLzmaProb isRep0Long[kNumStates][LZMA_NUM_PB_STATES_MAX]; + CLzmaProb posSlotEncoder[kNumLenToPosStates][1 << kNumPosSlotBits]; + CLzmaProb posEncoders[kNumFullDistances]; + + CLenEnc lenProbs; + CLenEnc repLenProbs; + + #ifndef LZMA_LOG_BSR + Byte g_FastPos[1 << kNumLogBits]; + #endif + + CLenPriceEnc lenEnc; + CLenPriceEnc repLenEnc; + + COptimal opt[kNumOpts]; + + CSaveState saveState; + + #ifndef _7ZIP_ST + Byte pad2[128]; + #endif +} CLzmaEnc; + + + +#define COPY_ARR(dest, src, arr) memcpy(dest->arr, src->arr, sizeof(src->arr)); + +void LzmaEnc_SaveState(CLzmaEncHandle pp) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + CSaveState *dest = &p->saveState; + + dest->state = p->state; + + dest->lenProbs = p->lenProbs; + dest->repLenProbs = p->repLenProbs; + + COPY_ARR(dest, p, reps); + + COPY_ARR(dest, p, posAlignEncoder); + COPY_ARR(dest, p, isRep); + COPY_ARR(dest, p, isRepG0); + COPY_ARR(dest, p, isRepG1); + COPY_ARR(dest, p, isRepG2); + COPY_ARR(dest, p, isMatch); + COPY_ARR(dest, p, isRep0Long); + COPY_ARR(dest, p, posSlotEncoder); + COPY_ARR(dest, p, posEncoders); + + memcpy(dest->litProbs, p->litProbs, ((UInt32)0x300 << p->lclp) * sizeof(CLzmaProb)); +} + + +void LzmaEnc_RestoreState(CLzmaEncHandle pp) +{ + CLzmaEnc *dest = (CLzmaEnc *)pp; + const CSaveState *p = &dest->saveState; + + dest->state = p->state; + + dest->lenProbs = p->lenProbs; + dest->repLenProbs = p->repLenProbs; + + COPY_ARR(dest, p, reps); + + COPY_ARR(dest, p, posAlignEncoder); + COPY_ARR(dest, p, isRep); + COPY_ARR(dest, p, isRepG0); + COPY_ARR(dest, p, isRepG1); + COPY_ARR(dest, p, isRepG2); + COPY_ARR(dest, p, isMatch); + COPY_ARR(dest, p, isRep0Long); + COPY_ARR(dest, p, posSlotEncoder); + COPY_ARR(dest, p, posEncoders); + + memcpy(dest->litProbs, p->litProbs, ((UInt32)0x300 << dest->lclp) * sizeof(CLzmaProb)); +} + + + +SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + CLzmaEncProps props = *props2; + LzmaEncProps_Normalize(&props); + + if (props.lc > LZMA_LC_MAX + || props.lp > LZMA_LP_MAX + || props.pb > LZMA_PB_MAX + || props.dictSize > ((UInt64)1 << kDicLogSizeMaxCompress) + || props.dictSize > kLzmaMaxHistorySize) + return SZ_ERROR_PARAM; + + p->dictSize = props.dictSize; + { + unsigned fb = props.fb; + if (fb < 5) + fb = 5; + if (fb > LZMA_MATCH_LEN_MAX) + fb = LZMA_MATCH_LEN_MAX; + p->numFastBytes = fb; + } + p->lc = props.lc; + p->lp = props.lp; + p->pb = props.pb; + p->fastMode = (props.algo == 0); + // p->_maxMode = True; + p->matchFinderBase.btMode = (Byte)(props.btMode ? 1 : 0); + { + unsigned numHashBytes = 4; + if (props.btMode) + { + if (props.numHashBytes < 2) + numHashBytes = 2; + else if (props.numHashBytes < 4) + numHashBytes = props.numHashBytes; + } + p->matchFinderBase.numHashBytes = numHashBytes; + } + + p->matchFinderBase.cutValue = props.mc; + + p->writeEndMark = props.writeEndMark; + + #ifndef _7ZIP_ST + /* + if (newMultiThread != _multiThread) + { + ReleaseMatchFinder(); + _multiThread = newMultiThread; + } + */ + p->multiThread = (props.numThreads > 1); + #endif + + return SZ_OK; +} + + +void LzmaEnc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + p->matchFinderBase.expectedDataSize = expectedDataSiize; +} + + +#define kState_Start 0 +#define kState_LitAfterMatch 4 +#define kState_LitAfterRep 5 +#define kState_MatchAfterLit 7 +#define kState_RepAfterLit 8 + +static const Byte kLiteralNextStates[kNumStates] = {0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 4, 5}; +static const Byte kMatchNextStates[kNumStates] = {7, 7, 7, 7, 7, 7, 7, 10, 10, 10, 10, 10}; +static const Byte kRepNextStates[kNumStates] = {8, 8, 8, 8, 8, 8, 8, 11, 11, 11, 11, 11}; +static const Byte kShortRepNextStates[kNumStates]= {9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11}; + +#define IsLitState(s) ((s) < 7) +#define GetLenToPosState2(len) (((len) < kNumLenToPosStates - 1) ? (len) : kNumLenToPosStates - 1) +#define GetLenToPosState(len) (((len) < kNumLenToPosStates + 1) ? (len) - 2 : kNumLenToPosStates - 1) + +#define kInfinityPrice (1 << 30) + +static void RangeEnc_Construct(CRangeEnc *p) +{ + p->outStream = NULL; + p->bufBase = NULL; +} + +#define RangeEnc_GetProcessed(p) ((p)->processed + ((p)->buf - (p)->bufBase) + (p)->cacheSize) +#define RangeEnc_GetProcessed_sizet(p) ((size_t)(p)->processed + ((p)->buf - (p)->bufBase) + (size_t)(p)->cacheSize) + +#define RC_BUF_SIZE (1 << 16) + +static int RangeEnc_Alloc(CRangeEnc *p, ISzAllocPtr alloc) +{ + if (!p->bufBase) + { + p->bufBase = (Byte *)ISzAlloc_Alloc(alloc, RC_BUF_SIZE); + if (!p->bufBase) + return 0; + p->bufLim = p->bufBase + RC_BUF_SIZE; + } + return 1; +} + +static void RangeEnc_Free(CRangeEnc *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->bufBase); + p->bufBase = 0; +} + +static void RangeEnc_Init(CRangeEnc *p) +{ + /* Stream.Init(); */ + p->range = 0xFFFFFFFF; + p->cache = 0; + p->low = 0; + p->cacheSize = 0; + + p->buf = p->bufBase; + + p->processed = 0; + p->res = SZ_OK; +} + +MY_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p) +{ + size_t num; + if (p->res != SZ_OK) + return; + num = p->buf - p->bufBase; + if (num != ISeqOutStream_Write(p->outStream, p->bufBase, num)) + p->res = SZ_ERROR_WRITE; + p->processed += num; + p->buf = p->bufBase; +} + +MY_NO_INLINE static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p) +{ + UInt32 low = (UInt32)p->low; + unsigned high = (unsigned)(p->low >> 32); + p->low = (UInt32)(low << 8); + if (low < (UInt32)0xFF000000 || high != 0) + { + { + Byte *buf = p->buf; + *buf++ = (Byte)(p->cache + high); + p->cache = (unsigned)(low >> 24); + p->buf = buf; + if (buf == p->bufLim) + RangeEnc_FlushStream(p); + if (p->cacheSize == 0) + return; + } + high += 0xFF; + for (;;) + { + Byte *buf = p->buf; + *buf++ = (Byte)(high); + p->buf = buf; + if (buf == p->bufLim) + RangeEnc_FlushStream(p); + if (--p->cacheSize == 0) + return; + } + } + p->cacheSize++; +} + +static void RangeEnc_FlushData(CRangeEnc *p) +{ + int i; + for (i = 0; i < 5; i++) + RangeEnc_ShiftLow(p); +} + +#define RC_NORM(p) if (range < kTopValue) { range <<= 8; RangeEnc_ShiftLow(p); } + +#define RC_BIT_PRE(p, prob) \ + ttt = *(prob); \ + newBound = (range >> kNumBitModelTotalBits) * ttt; + +// #define _LZMA_ENC_USE_BRANCH + +#ifdef _LZMA_ENC_USE_BRANCH + +#define RC_BIT(p, prob, bit) { \ + RC_BIT_PRE(p, prob) \ + if (bit == 0) { range = newBound; ttt += (kBitModelTotal - ttt) >> kNumMoveBits; } \ + else { (p)->low += newBound; range -= newBound; ttt -= ttt >> kNumMoveBits; } \ + *(prob) = (CLzmaProb)ttt; \ + RC_NORM(p) \ + } + +#else + +#define RC_BIT(p, prob, bit) { \ + UInt32 mask; \ + RC_BIT_PRE(p, prob) \ + mask = 0 - (UInt32)bit; \ + range &= mask; \ + mask &= newBound; \ + range -= mask; \ + (p)->low += mask; \ + mask = (UInt32)bit - 1; \ + range += newBound & mask; \ + mask &= (kBitModelTotal - ((1 << kNumMoveBits) - 1)); \ + mask += ((1 << kNumMoveBits) - 1); \ + ttt += (Int32)(mask - ttt) >> kNumMoveBits; \ + *(prob) = (CLzmaProb)ttt; \ + RC_NORM(p) \ + } + +#endif + + + + +#define RC_BIT_0_BASE(p, prob) \ + range = newBound; *(prob) = (CLzmaProb)(ttt + ((kBitModelTotal - ttt) >> kNumMoveBits)); + +#define RC_BIT_1_BASE(p, prob) \ + range -= newBound; (p)->low += newBound; *(prob) = (CLzmaProb)(ttt - (ttt >> kNumMoveBits)); \ + +#define RC_BIT_0(p, prob) \ + RC_BIT_0_BASE(p, prob) \ + RC_NORM(p) + +#define RC_BIT_1(p, prob) \ + RC_BIT_1_BASE(p, prob) \ + RC_NORM(p) + +static void RangeEnc_EncodeBit_0(CRangeEnc *p, CLzmaProb *prob) +{ + UInt32 range, ttt, newBound; + range = p->range; + RC_BIT_PRE(p, prob) + RC_BIT_0(p, prob) + p->range = range; +} + +static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 sym) +{ + UInt32 range = p->range; + sym |= 0x100; + do + { + UInt32 ttt, newBound; + // RangeEnc_EncodeBit(p, probs + (sym >> 8), (sym >> 7) & 1); + CLzmaProb *prob = probs + (sym >> 8); + UInt32 bit = (sym >> 7) & 1; + sym <<= 1; + RC_BIT(p, prob, bit); + } + while (sym < 0x10000); + p->range = range; +} + +static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 sym, UInt32 matchByte) +{ + UInt32 range = p->range; + UInt32 offs = 0x100; + sym |= 0x100; + do + { + UInt32 ttt, newBound; + CLzmaProb *prob; + UInt32 bit; + matchByte <<= 1; + // RangeEnc_EncodeBit(p, probs + (offs + (matchByte & offs) + (sym >> 8)), (sym >> 7) & 1); + prob = probs + (offs + (matchByte & offs) + (sym >> 8)); + bit = (sym >> 7) & 1; + sym <<= 1; + offs &= ~(matchByte ^ sym); + RC_BIT(p, prob, bit); + } + while (sym < 0x10000); + p->range = range; +} + + + +static void LzmaEnc_InitPriceTables(CProbPrice *ProbPrices) +{ + UInt32 i; + for (i = 0; i < (kBitModelTotal >> kNumMoveReducingBits); i++) + { + const unsigned kCyclesBits = kNumBitPriceShiftBits; + UInt32 w = (i << kNumMoveReducingBits) + (1 << (kNumMoveReducingBits - 1)); + unsigned bitCount = 0; + unsigned j; + for (j = 0; j < kCyclesBits; j++) + { + w = w * w; + bitCount <<= 1; + while (w >= ((UInt32)1 << 16)) + { + w >>= 1; + bitCount++; + } + } + ProbPrices[i] = (CProbPrice)((kNumBitModelTotalBits << kCyclesBits) - 15 - bitCount); + // printf("\n%3d: %5d", i, ProbPrices[i]); + } +} + + +#define GET_PRICE(prob, bit) \ + p->ProbPrices[((prob) ^ (unsigned)(((-(int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; + +#define GET_PRICEa(prob, bit) \ + ProbPrices[((prob) ^ (unsigned)((-((int)(bit))) & (kBitModelTotal - 1))) >> kNumMoveReducingBits]; + +#define GET_PRICE_0(prob) p->ProbPrices[(prob) >> kNumMoveReducingBits] +#define GET_PRICE_1(prob) p->ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] + +#define GET_PRICEa_0(prob) ProbPrices[(prob) >> kNumMoveReducingBits] +#define GET_PRICEa_1(prob) ProbPrices[((prob) ^ (kBitModelTotal - 1)) >> kNumMoveReducingBits] + + +static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 sym, const CProbPrice *ProbPrices) +{ + UInt32 price = 0; + sym |= 0x100; + do + { + unsigned bit = sym & 1; + sym >>= 1; + price += GET_PRICEa(probs[sym], bit); + } + while (sym >= 2); + return price; +} + + +static UInt32 LitEnc_Matched_GetPrice(const CLzmaProb *probs, UInt32 sym, UInt32 matchByte, const CProbPrice *ProbPrices) +{ + UInt32 price = 0; + UInt32 offs = 0x100; + sym |= 0x100; + do + { + matchByte <<= 1; + price += GET_PRICEa(probs[offs + (matchByte & offs) + (sym >> 8)], (sym >> 7) & 1); + sym <<= 1; + offs &= ~(matchByte ^ sym); + } + while (sym < 0x10000); + return price; +} + + +static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, unsigned numBits, unsigned sym) +{ + UInt32 range = rc->range; + unsigned m = 1; + do + { + UInt32 ttt, newBound; + unsigned bit = sym & 1; + // RangeEnc_EncodeBit(rc, probs + m, bit); + sym >>= 1; + RC_BIT(rc, probs + m, bit); + m = (m << 1) | bit; + } + while (--numBits); + rc->range = range; +} + + + +static void LenEnc_Init(CLenEnc *p) +{ + unsigned i; + for (i = 0; i < (LZMA_NUM_PB_STATES_MAX << (kLenNumLowBits + 1)); i++) + p->low[i] = kProbInitValue; + for (i = 0; i < kLenNumHighSymbols; i++) + p->high[i] = kProbInitValue; +} + +static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posState) +{ + UInt32 range, ttt, newBound; + CLzmaProb *probs = p->low; + range = rc->range; + RC_BIT_PRE(rc, probs); + if (sym >= kLenNumLowSymbols) + { + RC_BIT_1(rc, probs); + probs += kLenNumLowSymbols; + RC_BIT_PRE(rc, probs); + if (sym >= kLenNumLowSymbols * 2) + { + RC_BIT_1(rc, probs); + rc->range = range; + // RcTree_Encode(rc, p->high, kLenNumHighBits, sym - kLenNumLowSymbols * 2); + LitEnc_Encode(rc, p->high, sym - kLenNumLowSymbols * 2); + return; + } + sym -= kLenNumLowSymbols; + } + + // RcTree_Encode(rc, probs + (posState << kLenNumLowBits), kLenNumLowBits, sym); + { + unsigned m; + unsigned bit; + RC_BIT_0(rc, probs); + probs += (posState << (1 + kLenNumLowBits)); + bit = (sym >> 2) ; RC_BIT(rc, probs + 1, bit); m = (1 << 1) + bit; + bit = (sym >> 1) & 1; RC_BIT(rc, probs + m, bit); m = (m << 1) + bit; + bit = sym & 1; RC_BIT(rc, probs + m, bit); + rc->range = range; + } +} + +static void SetPrices_3(const CLzmaProb *probs, UInt32 startPrice, UInt32 *prices, const CProbPrice *ProbPrices) +{ + unsigned i; + for (i = 0; i < 8; i += 2) + { + UInt32 price = startPrice; + UInt32 prob; + price += GET_PRICEa(probs[1 ], (i >> 2)); + price += GET_PRICEa(probs[2 + (i >> 2)], (i >> 1) & 1); + prob = probs[4 + (i >> 1)]; + prices[i ] = price + GET_PRICEa_0(prob); + prices[i + 1] = price + GET_PRICEa_1(prob); + } +} + + +MY_NO_INLINE static void MY_FAST_CALL LenPriceEnc_UpdateTables( + CLenPriceEnc *p, + unsigned numPosStates, + const CLenEnc *enc, + const CProbPrice *ProbPrices) +{ + UInt32 b; + + { + unsigned prob = enc->low[0]; + UInt32 a, c; + unsigned posState; + b = GET_PRICEa_1(prob); + a = GET_PRICEa_0(prob); + c = b + GET_PRICEa_0(enc->low[kLenNumLowSymbols]); + for (posState = 0; posState < numPosStates; posState++) + { + UInt32 *prices = p->prices[posState]; + const CLzmaProb *probs = enc->low + (posState << (1 + kLenNumLowBits)); + SetPrices_3(probs, a, prices, ProbPrices); + SetPrices_3(probs + kLenNumLowSymbols, c, prices + kLenNumLowSymbols, ProbPrices); + } + } + + /* + { + unsigned i; + UInt32 b; + a = GET_PRICEa_0(enc->low[0]); + for (i = 0; i < kLenNumLowSymbols; i++) + p->prices2[i] = a; + a = GET_PRICEa_1(enc->low[0]); + b = a + GET_PRICEa_0(enc->low[kLenNumLowSymbols]); + for (i = kLenNumLowSymbols; i < kLenNumLowSymbols * 2; i++) + p->prices2[i] = b; + a += GET_PRICEa_1(enc->low[kLenNumLowSymbols]); + } + */ + + // p->counter = numSymbols; + // p->counter = 64; + + { + unsigned i = p->tableSize; + + if (i > kLenNumLowSymbols * 2) + { + const CLzmaProb *probs = enc->high; + UInt32 *prices = p->prices[0] + kLenNumLowSymbols * 2; + i -= kLenNumLowSymbols * 2 - 1; + i >>= 1; + b += GET_PRICEa_1(enc->low[kLenNumLowSymbols]); + do + { + /* + p->prices2[i] = a + + // RcTree_GetPrice(enc->high, kLenNumHighBits, i - kLenNumLowSymbols * 2, ProbPrices); + LitEnc_GetPrice(probs, i - kLenNumLowSymbols * 2, ProbPrices); + */ + // UInt32 price = a + RcTree_GetPrice(probs, kLenNumHighBits - 1, sym, ProbPrices); + unsigned sym = --i + (1 << (kLenNumHighBits - 1)); + UInt32 price = b; + do + { + unsigned bit = sym & 1; + sym >>= 1; + price += GET_PRICEa(probs[sym], bit); + } + while (sym >= 2); + + { + unsigned prob = probs[(size_t)i + (1 << (kLenNumHighBits - 1))]; + prices[(size_t)i * 2 ] = price + GET_PRICEa_0(prob); + prices[(size_t)i * 2 + 1] = price + GET_PRICEa_1(prob); + } + } + while (i); + + { + unsigned posState; + size_t num = (p->tableSize - kLenNumLowSymbols * 2) * sizeof(p->prices[0][0]); + for (posState = 1; posState < numPosStates; posState++) + memcpy(p->prices[posState] + kLenNumLowSymbols * 2, p->prices[0] + kLenNumLowSymbols * 2, num); + } + } + } +} + +/* + #ifdef SHOW_STAT + g_STAT_OFFSET += num; + printf("\n MovePos %u", num); + #endif +*/ + +#define MOVE_POS(p, num) { \ + p->additionalOffset += (num); \ + p->matchFinder.Skip(p->matchFinderObj, (UInt32)(num)); } + + +static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes) +{ + unsigned numPairs; + + p->additionalOffset++; + p->numAvail = p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); + numPairs = p->matchFinder.GetMatches(p->matchFinderObj, p->matches); + *numPairsRes = numPairs; + + #ifdef SHOW_STAT + printf("\n i = %u numPairs = %u ", g_STAT_OFFSET, numPairs / 2); + g_STAT_OFFSET++; + { + unsigned i; + for (i = 0; i < numPairs; i += 2) + printf("%2u %6u | ", p->matches[i], p->matches[i + 1]); + } + #endif + + if (numPairs == 0) + return 0; + { + unsigned len = p->matches[(size_t)numPairs - 2]; + if (len != p->numFastBytes) + return len; + { + UInt32 numAvail = p->numAvail; + if (numAvail > LZMA_MATCH_LEN_MAX) + numAvail = LZMA_MATCH_LEN_MAX; + { + const Byte *p1 = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; + const Byte *p2 = p1 + len; + ptrdiff_t dif = (ptrdiff_t)-1 - p->matches[(size_t)numPairs - 1]; + const Byte *lim = p1 + numAvail; + for (; p2 != lim && *p2 == p2[dif]; p2++) + {} + return (unsigned)(p2 - p1); + } + } + } +} + +#define MARK_LIT ((UInt32)(Int32)-1) + +#define MakeAs_Lit(p) { (p)->dist = MARK_LIT; (p)->extra = 0; } +#define MakeAs_ShortRep(p) { (p)->dist = 0; (p)->extra = 0; } +#define IsShortRep(p) ((p)->dist == 0) + + +#define GetPrice_ShortRep(p, state, posState) \ + ( GET_PRICE_0(p->isRepG0[state]) + GET_PRICE_0(p->isRep0Long[state][posState])) + +#define GetPrice_Rep_0(p, state, posState) ( \ + GET_PRICE_1(p->isMatch[state][posState]) \ + + GET_PRICE_1(p->isRep0Long[state][posState])) \ + + GET_PRICE_1(p->isRep[state]) \ + + GET_PRICE_0(p->isRepG0[state]) + +MY_FORCE_INLINE +static UInt32 GetPrice_PureRep(const CLzmaEnc *p, unsigned repIndex, size_t state, size_t posState) +{ + UInt32 price; + UInt32 prob = p->isRepG0[state]; + if (repIndex == 0) + { + price = GET_PRICE_0(prob); + price += GET_PRICE_1(p->isRep0Long[state][posState]); + } + else + { + price = GET_PRICE_1(prob); + prob = p->isRepG1[state]; + if (repIndex == 1) + price += GET_PRICE_0(prob); + else + { + price += GET_PRICE_1(prob); + price += GET_PRICE(p->isRepG2[state], repIndex - 2); + } + } + return price; +} + + +static unsigned Backward(CLzmaEnc *p, unsigned cur) +{ + unsigned wr = cur + 1; + p->optEnd = wr; + + for (;;) + { + UInt32 dist = p->opt[cur].dist; + unsigned len = (unsigned)p->opt[cur].len; + unsigned extra = (unsigned)p->opt[cur].extra; + cur -= len; + + if (extra) + { + wr--; + p->opt[wr].len = (UInt32)len; + cur -= extra; + len = extra; + if (extra == 1) + { + p->opt[wr].dist = dist; + dist = MARK_LIT; + } + else + { + p->opt[wr].dist = 0; + len--; + wr--; + p->opt[wr].dist = MARK_LIT; + p->opt[wr].len = 1; + } + } + + if (cur == 0) + { + p->backRes = dist; + p->optCur = wr; + return len; + } + + wr--; + p->opt[wr].dist = dist; + p->opt[wr].len = (UInt32)len; + } +} + + + +#define LIT_PROBS(pos, prevByte) \ + (p->litProbs + (UInt32)3 * (((((pos) << 8) + (prevByte)) & p->lpMask) << p->lc)) + + +static unsigned GetOptimum(CLzmaEnc *p, UInt32 position) +{ + unsigned last, cur; + UInt32 reps[LZMA_NUM_REPS]; + unsigned repLens[LZMA_NUM_REPS]; + UInt32 *matches; + + { + UInt32 numAvail; + unsigned numPairs, mainLen, repMaxIndex, i, posState; + UInt32 matchPrice, repMatchPrice; + const Byte *data; + Byte curByte, matchByte; + + p->optCur = p->optEnd = 0; + + if (p->additionalOffset == 0) + mainLen = ReadMatchDistances(p, &numPairs); + else + { + mainLen = p->longestMatchLen; + numPairs = p->numPairs; + } + + numAvail = p->numAvail; + if (numAvail < 2) + { + p->backRes = MARK_LIT; + return 1; + } + if (numAvail > LZMA_MATCH_LEN_MAX) + numAvail = LZMA_MATCH_LEN_MAX; + + data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; + repMaxIndex = 0; + + for (i = 0; i < LZMA_NUM_REPS; i++) + { + unsigned len; + const Byte *data2; + reps[i] = p->reps[i]; + data2 = data - reps[i]; + if (data[0] != data2[0] || data[1] != data2[1]) + { + repLens[i] = 0; + continue; + } + for (len = 2; len < numAvail && data[len] == data2[len]; len++) + {} + repLens[i] = len; + if (len > repLens[repMaxIndex]) + repMaxIndex = i; + } + + if (repLens[repMaxIndex] >= p->numFastBytes) + { + unsigned len; + p->backRes = (UInt32)repMaxIndex; + len = repLens[repMaxIndex]; + MOVE_POS(p, len - 1) + return len; + } + + matches = p->matches; + + if (mainLen >= p->numFastBytes) + { + p->backRes = matches[(size_t)numPairs - 1] + LZMA_NUM_REPS; + MOVE_POS(p, mainLen - 1) + return mainLen; + } + + curByte = *data; + matchByte = *(data - reps[0]); + + last = repLens[repMaxIndex]; + if (last <= mainLen) + last = mainLen; + + if (last < 2 && curByte != matchByte) + { + p->backRes = MARK_LIT; + return 1; + } + + p->opt[0].state = (CState)p->state; + + posState = (position & p->pbMask); + + { + const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); + p->opt[1].price = GET_PRICE_0(p->isMatch[p->state][posState]) + + (!IsLitState(p->state) ? + LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) : + LitEnc_GetPrice(probs, curByte, p->ProbPrices)); + } + + MakeAs_Lit(&p->opt[1]); + + matchPrice = GET_PRICE_1(p->isMatch[p->state][posState]); + repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[p->state]); + + // 18.06 + if (matchByte == curByte && repLens[0] == 0) + { + UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, p->state, posState); + if (shortRepPrice < p->opt[1].price) + { + p->opt[1].price = shortRepPrice; + MakeAs_ShortRep(&p->opt[1]); + } + if (last < 2) + { + p->backRes = p->opt[1].dist; + return 1; + } + } + + p->opt[1].len = 1; + + p->opt[0].reps[0] = reps[0]; + p->opt[0].reps[1] = reps[1]; + p->opt[0].reps[2] = reps[2]; + p->opt[0].reps[3] = reps[3]; + + // ---------- REP ---------- + + for (i = 0; i < LZMA_NUM_REPS; i++) + { + unsigned repLen = repLens[i]; + UInt32 price; + if (repLen < 2) + continue; + price = repMatchPrice + GetPrice_PureRep(p, i, p->state, posState); + do + { + UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, repLen); + COptimal *opt = &p->opt[repLen]; + if (price2 < opt->price) + { + opt->price = price2; + opt->len = (UInt32)repLen; + opt->dist = (UInt32)i; + opt->extra = 0; + } + } + while (--repLen >= 2); + } + + + // ---------- MATCH ---------- + { + unsigned len = repLens[0] + 1; + if (len <= mainLen) + { + unsigned offs = 0; + UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[p->state]); + + if (len < 2) + len = 2; + else + while (len > matches[offs]) + offs += 2; + + for (; ; len++) + { + COptimal *opt; + UInt32 dist = matches[(size_t)offs + 1]; + UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len); + unsigned lenToPosState = GetLenToPosState(len); + + if (dist < kNumFullDistances) + price += p->distancesPrices[lenToPosState][dist & (kNumFullDistances - 1)]; + else + { + unsigned slot; + GetPosSlot2(dist, slot); + price += p->alignPrices[dist & kAlignMask]; + price += p->posSlotPrices[lenToPosState][slot]; + } + + opt = &p->opt[len]; + + if (price < opt->price) + { + opt->price = price; + opt->len = (UInt32)len; + opt->dist = dist + LZMA_NUM_REPS; + opt->extra = 0; + } + + if (len == matches[offs]) + { + offs += 2; + if (offs == numPairs) + break; + } + } + } + } + + + cur = 0; + + #ifdef SHOW_STAT2 + /* if (position >= 0) */ + { + unsigned i; + printf("\n pos = %4X", position); + for (i = cur; i <= last; i++) + printf("\nprice[%4X] = %u", position - cur + i, p->opt[i].price); + } + #endif + } + + + + // ---------- Optimal Parsing ---------- + + for (;;) + { + unsigned numAvail; + UInt32 numAvailFull; + unsigned newLen, numPairs, prev, state, posState, startLen; + UInt32 litPrice, matchPrice, repMatchPrice; + BoolInt nextIsLit; + Byte curByte, matchByte; + const Byte *data; + COptimal *curOpt, *nextOpt; + + if (++cur == last) + break; + + // 18.06 + if (cur >= kNumOpts - 64) + { + unsigned j, best; + UInt32 price = p->opt[cur].price; + best = cur; + for (j = cur + 1; j <= last; j++) + { + UInt32 price2 = p->opt[j].price; + if (price >= price2) + { + price = price2; + best = j; + } + } + { + unsigned delta = best - cur; + if (delta != 0) + { + MOVE_POS(p, delta); + } + } + cur = best; + break; + } + + newLen = ReadMatchDistances(p, &numPairs); + + if (newLen >= p->numFastBytes) + { + p->numPairs = numPairs; + p->longestMatchLen = newLen; + break; + } + + curOpt = &p->opt[cur]; + + position++; + + // we need that check here, if skip_items in p->opt are possible + /* + if (curOpt->price >= kInfinityPrice) + continue; + */ + + prev = cur - curOpt->len; + + if (curOpt->len == 1) + { + state = (unsigned)p->opt[prev].state; + if (IsShortRep(curOpt)) + state = kShortRepNextStates[state]; + else + state = kLiteralNextStates[state]; + } + else + { + const COptimal *prevOpt; + UInt32 b0; + UInt32 dist = curOpt->dist; + + if (curOpt->extra) + { + prev -= (unsigned)curOpt->extra; + state = kState_RepAfterLit; + if (curOpt->extra == 1) + state = (dist < LZMA_NUM_REPS ? kState_RepAfterLit : kState_MatchAfterLit); + } + else + { + state = (unsigned)p->opt[prev].state; + if (dist < LZMA_NUM_REPS) + state = kRepNextStates[state]; + else + state = kMatchNextStates[state]; + } + + prevOpt = &p->opt[prev]; + b0 = prevOpt->reps[0]; + + if (dist < LZMA_NUM_REPS) + { + if (dist == 0) + { + reps[0] = b0; + reps[1] = prevOpt->reps[1]; + reps[2] = prevOpt->reps[2]; + reps[3] = prevOpt->reps[3]; + } + else + { + reps[1] = b0; + b0 = prevOpt->reps[1]; + if (dist == 1) + { + reps[0] = b0; + reps[2] = prevOpt->reps[2]; + reps[3] = prevOpt->reps[3]; + } + else + { + reps[2] = b0; + reps[0] = prevOpt->reps[dist]; + reps[3] = prevOpt->reps[dist ^ 1]; + } + } + } + else + { + reps[0] = (dist - LZMA_NUM_REPS + 1); + reps[1] = b0; + reps[2] = prevOpt->reps[1]; + reps[3] = prevOpt->reps[2]; + } + } + + curOpt->state = (CState)state; + curOpt->reps[0] = reps[0]; + curOpt->reps[1] = reps[1]; + curOpt->reps[2] = reps[2]; + curOpt->reps[3] = reps[3]; + + data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; + curByte = *data; + matchByte = *(data - reps[0]); + + posState = (position & p->pbMask); + + /* + The order of Price checks: + < LIT + <= SHORT_REP + < LIT : REP_0 + < REP [ : LIT : REP_0 ] + < MATCH [ : LIT : REP_0 ] + */ + + { + UInt32 curPrice = curOpt->price; + unsigned prob = p->isMatch[state][posState]; + matchPrice = curPrice + GET_PRICE_1(prob); + litPrice = curPrice + GET_PRICE_0(prob); + } + + nextOpt = &p->opt[(size_t)cur + 1]; + nextIsLit = False; + + // here we can allow skip_items in p->opt, if we don't check (nextOpt->price < kInfinityPrice) + // 18.new.06 + if ((nextOpt->price < kInfinityPrice + // && !IsLitState(state) + && matchByte == curByte) + || litPrice > nextOpt->price + ) + litPrice = 0; + else + { + const CLzmaProb *probs = LIT_PROBS(position, *(data - 1)); + litPrice += (!IsLitState(state) ? + LitEnc_Matched_GetPrice(probs, curByte, matchByte, p->ProbPrices) : + LitEnc_GetPrice(probs, curByte, p->ProbPrices)); + + if (litPrice < nextOpt->price) + { + nextOpt->price = litPrice; + nextOpt->len = 1; + MakeAs_Lit(nextOpt); + nextIsLit = True; + } + } + + repMatchPrice = matchPrice + GET_PRICE_1(p->isRep[state]); + + numAvailFull = p->numAvail; + { + unsigned temp = kNumOpts - 1 - cur; + if (numAvailFull > temp) + numAvailFull = (UInt32)temp; + } + + // 18.06 + // ---------- SHORT_REP ---------- + if (IsLitState(state)) // 18.new + if (matchByte == curByte) + if (repMatchPrice < nextOpt->price) // 18.new + // if (numAvailFull < 2 || data[1] != *(data - reps[0] + 1)) + if ( + // nextOpt->price >= kInfinityPrice || + nextOpt->len < 2 // we can check nextOpt->len, if skip items are not allowed in p->opt + || (nextOpt->dist != 0 + // && nextOpt->extra <= 1 // 17.old + ) + ) + { + UInt32 shortRepPrice = repMatchPrice + GetPrice_ShortRep(p, state, posState); + // if (shortRepPrice <= nextOpt->price) // 17.old + if (shortRepPrice < nextOpt->price) // 18.new + { + nextOpt->price = shortRepPrice; + nextOpt->len = 1; + MakeAs_ShortRep(nextOpt); + nextIsLit = False; + } + } + + if (numAvailFull < 2) + continue; + numAvail = (numAvailFull <= p->numFastBytes ? numAvailFull : p->numFastBytes); + + // numAvail <= p->numFastBytes + + // ---------- LIT : REP_0 ---------- + + if (!nextIsLit + && litPrice != 0 // 18.new + && matchByte != curByte + && numAvailFull > 2) + { + const Byte *data2 = data - reps[0]; + if (data[1] == data2[1] && data[2] == data2[2]) + { + unsigned len; + unsigned limit = p->numFastBytes + 1; + if (limit > numAvailFull) + limit = numAvailFull; + for (len = 3; len < limit && data[len] == data2[len]; len++) + {} + + { + unsigned state2 = kLiteralNextStates[state]; + unsigned posState2 = (position + 1) & p->pbMask; + UInt32 price = litPrice + GetPrice_Rep_0(p, state2, posState2); + { + unsigned offset = cur + len; + + if (last < offset) + last = offset; + + // do + { + UInt32 price2; + COptimal *opt; + len--; + // price2 = price + GetPrice_Len_Rep_0(p, len, state2, posState2); + price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len); + + opt = &p->opt[offset]; + // offset--; + if (price2 < opt->price) + { + opt->price = price2; + opt->len = (UInt32)len; + opt->dist = 0; + opt->extra = 1; + } + } + // while (len >= 3); + } + } + } + } + + startLen = 2; /* speed optimization */ + + { + // ---------- REP ---------- + unsigned repIndex = 0; // 17.old + // unsigned repIndex = IsLitState(state) ? 0 : 1; // 18.notused + for (; repIndex < LZMA_NUM_REPS; repIndex++) + { + unsigned len; + UInt32 price; + const Byte *data2 = data - reps[repIndex]; + if (data[0] != data2[0] || data[1] != data2[1]) + continue; + + for (len = 2; len < numAvail && data[len] == data2[len]; len++) + {} + + // if (len < startLen) continue; // 18.new: speed optimization + + { + unsigned offset = cur + len; + if (last < offset) + last = offset; + } + { + unsigned len2 = len; + price = repMatchPrice + GetPrice_PureRep(p, repIndex, state, posState); + do + { + UInt32 price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState, len2); + COptimal *opt = &p->opt[cur + len2]; + if (price2 < opt->price) + { + opt->price = price2; + opt->len = (UInt32)len2; + opt->dist = (UInt32)repIndex; + opt->extra = 0; + } + } + while (--len2 >= 2); + } + + if (repIndex == 0) startLen = len + 1; // 17.old + // startLen = len + 1; // 18.new + + /* if (_maxMode) */ + { + // ---------- REP : LIT : REP_0 ---------- + // numFastBytes + 1 + numFastBytes + + unsigned len2 = len + 1; + unsigned limit = len2 + p->numFastBytes; + if (limit > numAvailFull) + limit = numAvailFull; + + len2 += 2; + if (len2 <= limit) + if (data[len2 - 2] == data2[len2 - 2]) + if (data[len2 - 1] == data2[len2 - 1]) + { + unsigned state2 = kRepNextStates[state]; + unsigned posState2 = (position + len) & p->pbMask; + price += GET_PRICE_LEN(&p->repLenEnc, posState, len) + + GET_PRICE_0(p->isMatch[state2][posState2]) + + LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]), + data[len], data2[len], p->ProbPrices); + + // state2 = kLiteralNextStates[state2]; + state2 = kState_LitAfterRep; + posState2 = (posState2 + 1) & p->pbMask; + + + price += GetPrice_Rep_0(p, state2, posState2); + + for (; len2 < limit && data[len2] == data2[len2]; len2++) + {} + + len2 -= len; + // if (len2 >= 3) + { + { + unsigned offset = cur + len + len2; + + if (last < offset) + last = offset; + // do + { + UInt32 price2; + COptimal *opt; + len2--; + // price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2); + price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2); + + opt = &p->opt[offset]; + // offset--; + if (price2 < opt->price) + { + opt->price = price2; + opt->len = (UInt32)len2; + opt->extra = (CExtra)(len + 1); + opt->dist = (UInt32)repIndex; + } + } + // while (len2 >= 3); + } + } + } + } + } + } + + + // ---------- MATCH ---------- + /* for (unsigned len = 2; len <= newLen; len++) */ + if (newLen > numAvail) + { + newLen = numAvail; + for (numPairs = 0; newLen > matches[numPairs]; numPairs += 2); + matches[numPairs] = (UInt32)newLen; + numPairs += 2; + } + + // startLen = 2; /* speed optimization */ + + if (newLen >= startLen) + { + UInt32 normalMatchPrice = matchPrice + GET_PRICE_0(p->isRep[state]); + UInt32 dist; + unsigned offs, posSlot, len; + + { + unsigned offset = cur + newLen; + if (last < offset) + last = offset; + } + + offs = 0; + while (startLen > matches[offs]) + offs += 2; + dist = matches[(size_t)offs + 1]; + + // if (dist >= kNumFullDistances) + GetPosSlot2(dist, posSlot); + + for (len = /*2*/ startLen; ; len++) + { + UInt32 price = normalMatchPrice + GET_PRICE_LEN(&p->lenEnc, posState, len); + { + COptimal *opt; + unsigned lenNorm = len - 2; + lenNorm = GetLenToPosState2(lenNorm); + if (dist < kNumFullDistances) + price += p->distancesPrices[lenNorm][dist & (kNumFullDistances - 1)]; + else + price += p->posSlotPrices[lenNorm][posSlot] + p->alignPrices[dist & kAlignMask]; + + opt = &p->opt[cur + len]; + if (price < opt->price) + { + opt->price = price; + opt->len = (UInt32)len; + opt->dist = dist + LZMA_NUM_REPS; + opt->extra = 0; + } + } + + if (len == matches[offs]) + { + // if (p->_maxMode) { + // MATCH : LIT : REP_0 + + const Byte *data2 = data - dist - 1; + unsigned len2 = len + 1; + unsigned limit = len2 + p->numFastBytes; + if (limit > numAvailFull) + limit = numAvailFull; + + len2 += 2; + if (len2 <= limit) + if (data[len2 - 2] == data2[len2 - 2]) + if (data[len2 - 1] == data2[len2 - 1]) + { + for (; len2 < limit && data[len2] == data2[len2]; len2++) + {} + + len2 -= len; + + // if (len2 >= 3) + { + unsigned state2 = kMatchNextStates[state]; + unsigned posState2 = (position + len) & p->pbMask; + unsigned offset; + price += GET_PRICE_0(p->isMatch[state2][posState2]); + price += LitEnc_Matched_GetPrice(LIT_PROBS(position + len, data[(size_t)len - 1]), + data[len], data2[len], p->ProbPrices); + + // state2 = kLiteralNextStates[state2]; + state2 = kState_LitAfterMatch; + + posState2 = (posState2 + 1) & p->pbMask; + price += GetPrice_Rep_0(p, state2, posState2); + + offset = cur + len + len2; + + if (last < offset) + last = offset; + // do + { + UInt32 price2; + COptimal *opt; + len2--; + // price2 = price + GetPrice_Len_Rep_0(p, len2, state2, posState2); + price2 = price + GET_PRICE_LEN(&p->repLenEnc, posState2, len2); + opt = &p->opt[offset]; + // offset--; + if (price2 < opt->price) + { + opt->price = price2; + opt->len = (UInt32)len2; + opt->extra = (CExtra)(len + 1); + opt->dist = dist + LZMA_NUM_REPS; + } + } + // while (len2 >= 3); + } + + } + + offs += 2; + if (offs == numPairs) + break; + dist = matches[(size_t)offs + 1]; + // if (dist >= kNumFullDistances) + GetPosSlot2(dist, posSlot); + } + } + } + } + + do + p->opt[last].price = kInfinityPrice; + while (--last); + + return Backward(p, cur); +} + + + +#define ChangePair(smallDist, bigDist) (((bigDist) >> 7) > (smallDist)) + + + +static unsigned GetOptimumFast(CLzmaEnc *p) +{ + UInt32 numAvail, mainDist; + unsigned mainLen, numPairs, repIndex, repLen, i; + const Byte *data; + + if (p->additionalOffset == 0) + mainLen = ReadMatchDistances(p, &numPairs); + else + { + mainLen = p->longestMatchLen; + numPairs = p->numPairs; + } + + numAvail = p->numAvail; + p->backRes = MARK_LIT; + if (numAvail < 2) + return 1; + // if (mainLen < 2 && p->state == 0) return 1; // 18.06.notused + if (numAvail > LZMA_MATCH_LEN_MAX) + numAvail = LZMA_MATCH_LEN_MAX; + data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; + repLen = repIndex = 0; + + for (i = 0; i < LZMA_NUM_REPS; i++) + { + unsigned len; + const Byte *data2 = data - p->reps[i]; + if (data[0] != data2[0] || data[1] != data2[1]) + continue; + for (len = 2; len < numAvail && data[len] == data2[len]; len++) + {} + if (len >= p->numFastBytes) + { + p->backRes = (UInt32)i; + MOVE_POS(p, len - 1) + return len; + } + if (len > repLen) + { + repIndex = i; + repLen = len; + } + } + + if (mainLen >= p->numFastBytes) + { + p->backRes = p->matches[(size_t)numPairs - 1] + LZMA_NUM_REPS; + MOVE_POS(p, mainLen - 1) + return mainLen; + } + + mainDist = 0; /* for GCC */ + + if (mainLen >= 2) + { + mainDist = p->matches[(size_t)numPairs - 1]; + while (numPairs > 2) + { + UInt32 dist2; + if (mainLen != p->matches[(size_t)numPairs - 4] + 1) + break; + dist2 = p->matches[(size_t)numPairs - 3]; + if (!ChangePair(dist2, mainDist)) + break; + numPairs -= 2; + mainLen--; + mainDist = dist2; + } + if (mainLen == 2 && mainDist >= 0x80) + mainLen = 1; + } + + if (repLen >= 2) + if ( repLen + 1 >= mainLen + || (repLen + 2 >= mainLen && mainDist >= (1 << 9)) + || (repLen + 3 >= mainLen && mainDist >= (1 << 15))) + { + p->backRes = (UInt32)repIndex; + MOVE_POS(p, repLen - 1) + return repLen; + } + + if (mainLen < 2 || numAvail <= 2) + return 1; + + { + unsigned len1 = ReadMatchDistances(p, &p->numPairs); + p->longestMatchLen = len1; + + if (len1 >= 2) + { + UInt32 newDist = p->matches[(size_t)p->numPairs - 1]; + if ( (len1 >= mainLen && newDist < mainDist) + || (len1 == mainLen + 1 && !ChangePair(mainDist, newDist)) + || (len1 > mainLen + 1) + || (len1 + 1 >= mainLen && mainLen >= 3 && ChangePair(newDist, mainDist))) + return 1; + } + } + + data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - 1; + + for (i = 0; i < LZMA_NUM_REPS; i++) + { + unsigned len, limit; + const Byte *data2 = data - p->reps[i]; + if (data[0] != data2[0] || data[1] != data2[1]) + continue; + limit = mainLen - 1; + for (len = 2;; len++) + { + if (len >= limit) + return 1; + if (data[len] != data2[len]) + break; + } + } + + p->backRes = mainDist + LZMA_NUM_REPS; + if (mainLen != 2) + { + MOVE_POS(p, mainLen - 2) + } + return mainLen; +} + + + + +static void WriteEndMarker(CLzmaEnc *p, unsigned posState) +{ + UInt32 range; + range = p->rc.range; + { + UInt32 ttt, newBound; + CLzmaProb *prob = &p->isMatch[p->state][posState]; + RC_BIT_PRE(&p->rc, prob) + RC_BIT_1(&p->rc, prob) + prob = &p->isRep[p->state]; + RC_BIT_PRE(&p->rc, prob) + RC_BIT_0(&p->rc, prob) + } + p->state = kMatchNextStates[p->state]; + + p->rc.range = range; + LenEnc_Encode(&p->lenProbs, &p->rc, 0, posState); + range = p->rc.range; + + { + // RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[0], (1 << kNumPosSlotBits) - 1); + CLzmaProb *probs = p->posSlotEncoder[0]; + unsigned m = 1; + do + { + UInt32 ttt, newBound; + RC_BIT_PRE(p, probs + m) + RC_BIT_1(&p->rc, probs + m); + m = (m << 1) + 1; + } + while (m < (1 << kNumPosSlotBits)); + } + { + // RangeEnc_EncodeDirectBits(&p->rc, ((UInt32)1 << (30 - kNumAlignBits)) - 1, 30 - kNumAlignBits); UInt32 range = p->range; + unsigned numBits = 30 - kNumAlignBits; + do + { + range >>= 1; + p->rc.low += range; + RC_NORM(&p->rc) + } + while (--numBits); + } + + { + // RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, kAlignMask); + CLzmaProb *probs = p->posAlignEncoder; + unsigned m = 1; + do + { + UInt32 ttt, newBound; + RC_BIT_PRE(p, probs + m) + RC_BIT_1(&p->rc, probs + m); + m = (m << 1) + 1; + } + while (m < kAlignTableSize); + } + p->rc.range = range; +} + + +static SRes CheckErrors(CLzmaEnc *p) +{ + if (p->result != SZ_OK) + return p->result; + if (p->rc.res != SZ_OK) + p->result = SZ_ERROR_WRITE; + if (p->matchFinderBase.result != SZ_OK) + p->result = SZ_ERROR_READ; + if (p->result != SZ_OK) + p->finished = True; + return p->result; +} + + +MY_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos) +{ + /* ReleaseMFStream(); */ + p->finished = True; + if (p->writeEndMark) + WriteEndMarker(p, nowPos & p->pbMask); + RangeEnc_FlushData(&p->rc); + RangeEnc_FlushStream(&p->rc); + return CheckErrors(p); +} + + +MY_NO_INLINE static void FillAlignPrices(CLzmaEnc *p) +{ + unsigned i; + const CProbPrice *ProbPrices = p->ProbPrices; + const CLzmaProb *probs = p->posAlignEncoder; + // p->alignPriceCount = 0; + for (i = 0; i < kAlignTableSize / 2; i++) + { + UInt32 price = 0; + unsigned sym = i; + unsigned m = 1; + unsigned bit; + UInt32 prob; + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit; + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit; + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[m], bit); m = (m << 1) + bit; + prob = probs[m]; + p->alignPrices[i ] = price + GET_PRICEa_0(prob); + p->alignPrices[i + 8] = price + GET_PRICEa_1(prob); + // p->alignPrices[i] = RcTree_ReverseGetPrice(p->posAlignEncoder, kNumAlignBits, i, p->ProbPrices); + } +} + + +MY_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p) +{ + // int y; for (y = 0; y < 100; y++) { + + UInt32 tempPrices[kNumFullDistances]; + unsigned i, lps; + + const CProbPrice *ProbPrices = p->ProbPrices; + p->matchPriceCount = 0; + + for (i = kStartPosModelIndex / 2; i < kNumFullDistances / 2; i++) + { + unsigned posSlot = GetPosSlot1(i); + unsigned footerBits = (posSlot >> 1) - 1; + unsigned base = ((2 | (posSlot & 1)) << footerBits); + const CLzmaProb *probs = p->posEncoders + (size_t)base * 2; + // tempPrices[i] = RcTree_ReverseGetPrice(p->posEncoders + base, footerBits, i - base, p->ProbPrices); + UInt32 price = 0; + unsigned m = 1; + unsigned sym = i; + unsigned offset = (unsigned)1 << footerBits; + base += i; + + if (footerBits) + do + { + unsigned bit = sym & 1; + sym >>= 1; + price += GET_PRICEa(probs[m], bit); + m = (m << 1) + bit; + } + while (--footerBits); + + { + unsigned prob = probs[m]; + tempPrices[base ] = price + GET_PRICEa_0(prob); + tempPrices[base + offset] = price + GET_PRICEa_1(prob); + } + } + + for (lps = 0; lps < kNumLenToPosStates; lps++) + { + unsigned slot; + unsigned distTableSize2 = (p->distTableSize + 1) >> 1; + UInt32 *posSlotPrices = p->posSlotPrices[lps]; + const CLzmaProb *probs = p->posSlotEncoder[lps]; + + for (slot = 0; slot < distTableSize2; slot++) + { + // posSlotPrices[slot] = RcTree_GetPrice(encoder, kNumPosSlotBits, slot, p->ProbPrices); + UInt32 price; + unsigned bit; + unsigned sym = slot + (1 << (kNumPosSlotBits - 1)); + unsigned prob; + bit = sym & 1; sym >>= 1; price = GET_PRICEa(probs[sym], bit); + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit); + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit); + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit); + bit = sym & 1; sym >>= 1; price += GET_PRICEa(probs[sym], bit); + prob = probs[(size_t)slot + (1 << (kNumPosSlotBits - 1))]; + posSlotPrices[(size_t)slot * 2 ] = price + GET_PRICEa_0(prob); + posSlotPrices[(size_t)slot * 2 + 1] = price + GET_PRICEa_1(prob); + } + + { + UInt32 delta = ((UInt32)((kEndPosModelIndex / 2 - 1) - kNumAlignBits) << kNumBitPriceShiftBits); + for (slot = kEndPosModelIndex / 2; slot < distTableSize2; slot++) + { + posSlotPrices[(size_t)slot * 2 ] += delta; + posSlotPrices[(size_t)slot * 2 + 1] += delta; + delta += ((UInt32)1 << kNumBitPriceShiftBits); + } + } + + { + UInt32 *dp = p->distancesPrices[lps]; + + dp[0] = posSlotPrices[0]; + dp[1] = posSlotPrices[1]; + dp[2] = posSlotPrices[2]; + dp[3] = posSlotPrices[3]; + + for (i = 4; i < kNumFullDistances; i += 2) + { + UInt32 slotPrice = posSlotPrices[GetPosSlot1(i)]; + dp[i ] = slotPrice + tempPrices[i]; + dp[i + 1] = slotPrice + tempPrices[i + 1]; + } + } + } + // } +} + + + +void LzmaEnc_Construct(CLzmaEnc *p) +{ + RangeEnc_Construct(&p->rc); + MatchFinder_Construct(&p->matchFinderBase); + + #ifndef _7ZIP_ST + MatchFinderMt_Construct(&p->matchFinderMt); + p->matchFinderMt.MatchFinder = &p->matchFinderBase; + #endif + + { + CLzmaEncProps props; + LzmaEncProps_Init(&props); + LzmaEnc_SetProps(p, &props); + } + + #ifndef LZMA_LOG_BSR + LzmaEnc_FastPosInit(p->g_FastPos); + #endif + + LzmaEnc_InitPriceTables(p->ProbPrices); + p->litProbs = NULL; + p->saveState.litProbs = NULL; + +} + +CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc) +{ + void *p; + p = ISzAlloc_Alloc(alloc, sizeof(CLzmaEnc)); + if (p) + LzmaEnc_Construct((CLzmaEnc *)p); + return p; +} + +void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->litProbs); + ISzAlloc_Free(alloc, p->saveState.litProbs); + p->litProbs = NULL; + p->saveState.litProbs = NULL; +} + +void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + #ifndef _7ZIP_ST + MatchFinderMt_Destruct(&p->matchFinderMt, allocBig); + #endif + + MatchFinder_Free(&p->matchFinderBase, allocBig); + LzmaEnc_FreeLits(p, alloc); + RangeEnc_Free(&p->rc, alloc); +} + +void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + LzmaEnc_Destruct((CLzmaEnc *)p, alloc, allocBig); + ISzAlloc_Free(alloc, p); +} + + +static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpackSize) +{ + UInt32 nowPos32, startPos32; + if (p->needInit) + { + p->matchFinder.Init(p->matchFinderObj); + p->needInit = 0; + } + + if (p->finished) + return p->result; + RINOK(CheckErrors(p)); + + nowPos32 = (UInt32)p->nowPos64; + startPos32 = nowPos32; + + if (p->nowPos64 == 0) + { + unsigned numPairs; + Byte curByte; + if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) + return Flush(p, nowPos32); + ReadMatchDistances(p, &numPairs); + RangeEnc_EncodeBit_0(&p->rc, &p->isMatch[kState_Start][0]); + // p->state = kLiteralNextStates[p->state]; + curByte = *(p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset); + LitEnc_Encode(&p->rc, p->litProbs, curByte); + p->additionalOffset--; + nowPos32++; + } + + if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) != 0) + + for (;;) + { + UInt32 dist; + unsigned len, posState; + UInt32 range, ttt, newBound; + CLzmaProb *probs; + + if (p->fastMode) + len = GetOptimumFast(p); + else + { + unsigned oci = p->optCur; + if (p->optEnd == oci) + len = GetOptimum(p, nowPos32); + else + { + const COptimal *opt = &p->opt[oci]; + len = opt->len; + p->backRes = opt->dist; + p->optCur = oci + 1; + } + } + + posState = (unsigned)nowPos32 & p->pbMask; + range = p->rc.range; + probs = &p->isMatch[p->state][posState]; + + RC_BIT_PRE(&p->rc, probs) + + dist = p->backRes; + + #ifdef SHOW_STAT2 + printf("\n pos = %6X, len = %3u pos = %6u", nowPos32, len, dist); + #endif + + if (dist == MARK_LIT) + { + Byte curByte; + const Byte *data; + unsigned state; + + RC_BIT_0(&p->rc, probs); + p->rc.range = range; + data = p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; + probs = LIT_PROBS(nowPos32, *(data - 1)); + curByte = *data; + state = p->state; + p->state = kLiteralNextStates[state]; + if (IsLitState(state)) + LitEnc_Encode(&p->rc, probs, curByte); + else + LitEnc_EncodeMatched(&p->rc, probs, curByte, *(data - p->reps[0])); + } + else + { + RC_BIT_1(&p->rc, probs); + probs = &p->isRep[p->state]; + RC_BIT_PRE(&p->rc, probs) + + if (dist < LZMA_NUM_REPS) + { + RC_BIT_1(&p->rc, probs); + probs = &p->isRepG0[p->state]; + RC_BIT_PRE(&p->rc, probs) + if (dist == 0) + { + RC_BIT_0(&p->rc, probs); + probs = &p->isRep0Long[p->state][posState]; + RC_BIT_PRE(&p->rc, probs) + if (len != 1) + { + RC_BIT_1_BASE(&p->rc, probs); + } + else + { + RC_BIT_0_BASE(&p->rc, probs); + p->state = kShortRepNextStates[p->state]; + } + } + else + { + RC_BIT_1(&p->rc, probs); + probs = &p->isRepG1[p->state]; + RC_BIT_PRE(&p->rc, probs) + if (dist == 1) + { + RC_BIT_0_BASE(&p->rc, probs); + dist = p->reps[1]; + } + else + { + RC_BIT_1(&p->rc, probs); + probs = &p->isRepG2[p->state]; + RC_BIT_PRE(&p->rc, probs) + if (dist == 2) + { + RC_BIT_0_BASE(&p->rc, probs); + dist = p->reps[2]; + } + else + { + RC_BIT_1_BASE(&p->rc, probs); + dist = p->reps[3]; + p->reps[3] = p->reps[2]; + } + p->reps[2] = p->reps[1]; + } + p->reps[1] = p->reps[0]; + p->reps[0] = dist; + } + + RC_NORM(&p->rc) + + p->rc.range = range; + + if (len != 1) + { + LenEnc_Encode(&p->repLenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState); + --p->repLenEncCounter; + p->state = kRepNextStates[p->state]; + } + } + else + { + unsigned posSlot; + RC_BIT_0(&p->rc, probs); + p->rc.range = range; + p->state = kMatchNextStates[p->state]; + + LenEnc_Encode(&p->lenProbs, &p->rc, len - LZMA_MATCH_LEN_MIN, posState); + // --p->lenEnc.counter; + + dist -= LZMA_NUM_REPS; + p->reps[3] = p->reps[2]; + p->reps[2] = p->reps[1]; + p->reps[1] = p->reps[0]; + p->reps[0] = dist + 1; + + p->matchPriceCount++; + GetPosSlot(dist, posSlot); + // RcTree_Encode_PosSlot(&p->rc, p->posSlotEncoder[GetLenToPosState(len)], posSlot); + { + UInt32 sym = (UInt32)posSlot + (1 << kNumPosSlotBits); + range = p->rc.range; + probs = p->posSlotEncoder[GetLenToPosState(len)]; + do + { + CLzmaProb *prob = probs + (sym >> kNumPosSlotBits); + UInt32 bit = (sym >> (kNumPosSlotBits - 1)) & 1; + sym <<= 1; + RC_BIT(&p->rc, prob, bit); + } + while (sym < (1 << kNumPosSlotBits * 2)); + p->rc.range = range; + } + + if (dist >= kStartPosModelIndex) + { + unsigned footerBits = ((posSlot >> 1) - 1); + + if (dist < kNumFullDistances) + { + unsigned base = ((2 | (posSlot & 1)) << footerBits); + RcTree_ReverseEncode(&p->rc, p->posEncoders + base, footerBits, (unsigned)(dist /* - base */)); + } + else + { + UInt32 pos2 = (dist | 0xF) << (32 - footerBits); + range = p->rc.range; + // RangeEnc_EncodeDirectBits(&p->rc, posReduced >> kNumAlignBits, footerBits - kNumAlignBits); + /* + do + { + range >>= 1; + p->rc.low += range & (0 - ((dist >> --footerBits) & 1)); + RC_NORM(&p->rc) + } + while (footerBits > kNumAlignBits); + */ + do + { + range >>= 1; + p->rc.low += range & (0 - (pos2 >> 31)); + pos2 += pos2; + RC_NORM(&p->rc) + } + while (pos2 != 0xF0000000); + + + // RcTree_ReverseEncode(&p->rc, p->posAlignEncoder, kNumAlignBits, posReduced & kAlignMask); + + { + unsigned m = 1; + unsigned bit; + bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit; + bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit; + bit = dist & 1; dist >>= 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); m = (m << 1) + bit; + bit = dist & 1; RC_BIT(&p->rc, p->posAlignEncoder + m, bit); + p->rc.range = range; + // p->alignPriceCount++; + } + } + } + } + } + + nowPos32 += (UInt32)len; + p->additionalOffset -= len; + + if (p->additionalOffset == 0) + { + UInt32 processed; + + if (!p->fastMode) + { + /* + if (p->alignPriceCount >= 16) // kAlignTableSize + FillAlignPrices(p); + if (p->matchPriceCount >= 128) + FillDistancesPrices(p); + if (p->lenEnc.counter <= 0) + LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices); + */ + if (p->matchPriceCount >= 64) + { + FillAlignPrices(p); + // { int y; for (y = 0; y < 100; y++) { + FillDistancesPrices(p); + // }} + LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices); + } + if (p->repLenEncCounter <= 0) + { + p->repLenEncCounter = REP_LEN_COUNT; + LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, &p->repLenProbs, p->ProbPrices); + } + } + + if (p->matchFinder.GetNumAvailableBytes(p->matchFinderObj) == 0) + break; + processed = nowPos32 - startPos32; + + if (maxPackSize) + { + if (processed + kNumOpts + 300 >= maxUnpackSize + || RangeEnc_GetProcessed_sizet(&p->rc) + kPackReserve >= maxPackSize) + break; + } + else if (processed >= (1 << 17)) + { + p->nowPos64 += nowPos32 - startPos32; + return CheckErrors(p); + } + } + } + + p->nowPos64 += nowPos32 - startPos32; + return Flush(p, nowPos32); +} + + + +#define kBigHashDicLimit ((UInt32)1 << 24) + +static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + UInt32 beforeSize = kNumOpts; + if (!RangeEnc_Alloc(&p->rc, alloc)) + return SZ_ERROR_MEM; + + #ifndef _7ZIP_ST + p->mtMode = (p->multiThread && !p->fastMode && (p->matchFinderBase.btMode != 0)); + #endif + + { + unsigned lclp = p->lc + p->lp; + if (!p->litProbs || !p->saveState.litProbs || p->lclp != lclp) + { + LzmaEnc_FreeLits(p, alloc); + p->litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb)); + p->saveState.litProbs = (CLzmaProb *)ISzAlloc_Alloc(alloc, ((UInt32)0x300 << lclp) * sizeof(CLzmaProb)); + if (!p->litProbs || !p->saveState.litProbs) + { + LzmaEnc_FreeLits(p, alloc); + return SZ_ERROR_MEM; + } + p->lclp = lclp; + } + } + + p->matchFinderBase.bigHash = (Byte)(p->dictSize > kBigHashDicLimit ? 1 : 0); + + if (beforeSize + p->dictSize < keepWindowSize) + beforeSize = keepWindowSize - p->dictSize; + + #ifndef _7ZIP_ST + if (p->mtMode) + { + RINOK(MatchFinderMt_Create(&p->matchFinderMt, p->dictSize, beforeSize, p->numFastBytes, + LZMA_MATCH_LEN_MAX + + 1 /* 18.04 */ + , allocBig)); + p->matchFinderObj = &p->matchFinderMt; + p->matchFinderBase.bigHash = (Byte)( + (p->dictSize > kBigHashDicLimit && p->matchFinderBase.hashMask >= 0xFFFFFF) ? 1 : 0); + MatchFinderMt_CreateVTable(&p->matchFinderMt, &p->matchFinder); + } + else + #endif + { + if (!MatchFinder_Create(&p->matchFinderBase, p->dictSize, beforeSize, p->numFastBytes, LZMA_MATCH_LEN_MAX, allocBig)) + return SZ_ERROR_MEM; + p->matchFinderObj = &p->matchFinderBase; + MatchFinder_CreateVTable(&p->matchFinderBase, &p->matchFinder); + } + + return SZ_OK; +} + +void LzmaEnc_Init(CLzmaEnc *p) +{ + unsigned i; + p->state = 0; + p->reps[0] = + p->reps[1] = + p->reps[2] = + p->reps[3] = 1; + + RangeEnc_Init(&p->rc); + + for (i = 0; i < (1 << kNumAlignBits); i++) + p->posAlignEncoder[i] = kProbInitValue; + + for (i = 0; i < kNumStates; i++) + { + unsigned j; + for (j = 0; j < LZMA_NUM_PB_STATES_MAX; j++) + { + p->isMatch[i][j] = kProbInitValue; + p->isRep0Long[i][j] = kProbInitValue; + } + p->isRep[i] = kProbInitValue; + p->isRepG0[i] = kProbInitValue; + p->isRepG1[i] = kProbInitValue; + p->isRepG2[i] = kProbInitValue; + } + + { + for (i = 0; i < kNumLenToPosStates; i++) + { + CLzmaProb *probs = p->posSlotEncoder[i]; + unsigned j; + for (j = 0; j < (1 << kNumPosSlotBits); j++) + probs[j] = kProbInitValue; + } + } + { + for (i = 0; i < kNumFullDistances; i++) + p->posEncoders[i] = kProbInitValue; + } + + { + UInt32 num = (UInt32)0x300 << (p->lp + p->lc); + UInt32 k; + CLzmaProb *probs = p->litProbs; + for (k = 0; k < num; k++) + probs[k] = kProbInitValue; + } + + + LenEnc_Init(&p->lenProbs); + LenEnc_Init(&p->repLenProbs); + + p->optEnd = 0; + p->optCur = 0; + + { + for (i = 0; i < kNumOpts; i++) + p->opt[i].price = kInfinityPrice; + } + + p->additionalOffset = 0; + + p->pbMask = (1 << p->pb) - 1; + p->lpMask = ((UInt32)0x100 << p->lp) - ((unsigned)0x100 >> p->lc); +} + + +void LzmaEnc_InitPrices(CLzmaEnc *p) +{ + if (!p->fastMode) + { + FillDistancesPrices(p); + FillAlignPrices(p); + } + + p->lenEnc.tableSize = + p->repLenEnc.tableSize = + p->numFastBytes + 1 - LZMA_MATCH_LEN_MIN; + + p->repLenEncCounter = REP_LEN_COUNT; + + LenPriceEnc_UpdateTables(&p->lenEnc, 1 << p->pb, &p->lenProbs, p->ProbPrices); + LenPriceEnc_UpdateTables(&p->repLenEnc, 1 << p->pb, &p->repLenProbs, p->ProbPrices); +} + +static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + unsigned i; + for (i = kEndPosModelIndex / 2; i < kDicLogSizeMax; i++) + if (p->dictSize <= ((UInt32)1 << i)) + break; + p->distTableSize = i * 2; + + p->finished = False; + p->result = SZ_OK; + RINOK(LzmaEnc_Alloc(p, keepWindowSize, alloc, allocBig)); + LzmaEnc_Init(p); + LzmaEnc_InitPrices(p); + p->nowPos64 = 0; + return SZ_OK; +} + +static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, + ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + p->matchFinderBase.stream = inStream; + p->needInit = 1; + p->rc.outStream = outStream; + return LzmaEnc_AllocAndInit(p, 0, alloc, allocBig); +} + +SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, + ISeqInStream *inStream, UInt32 keepWindowSize, + ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + p->matchFinderBase.stream = inStream; + p->needInit = 1; + return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); +} + +static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen) +{ + p->matchFinderBase.directInput = 1; + p->matchFinderBase.bufferBase = (Byte *)src; + p->matchFinderBase.directInputRem = srcLen; +} + +SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen, + UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + LzmaEnc_SetInputBuf(p, src, srcLen); + p->needInit = 1; + + LzmaEnc_SetDataSize(pp, srcLen); + return LzmaEnc_AllocAndInit(p, keepWindowSize, alloc, allocBig); +} + +void LzmaEnc_Finish(CLzmaEncHandle pp) +{ + #ifndef _7ZIP_ST + CLzmaEnc *p = (CLzmaEnc *)pp; + if (p->mtMode) + MatchFinderMt_ReleaseStream(&p->matchFinderMt); + #else + UNUSED_VAR(pp); + #endif +} + + +typedef struct +{ + ISeqOutStream vt; + Byte *data; + SizeT rem; + BoolInt overflow; +} CLzmaEnc_SeqOutStreamBuf; + +static size_t SeqOutStreamBuf_Write(const ISeqOutStream *pp, const void *data, size_t size) +{ + CLzmaEnc_SeqOutStreamBuf *p = CONTAINER_FROM_VTBL(pp, CLzmaEnc_SeqOutStreamBuf, vt); + if (p->rem < size) + { + size = p->rem; + p->overflow = True; + } + memcpy(p->data, data, size); + p->rem -= size; + p->data += size; + return size; +} + + +UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp) +{ + const CLzmaEnc *p = (CLzmaEnc *)pp; + return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj); +} + + +const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp) +{ + const CLzmaEnc *p = (CLzmaEnc *)pp; + return p->matchFinder.GetPointerToCurrentPos(p->matchFinderObj) - p->additionalOffset; +} + + +SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit, + Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + UInt64 nowPos64; + SRes res; + CLzmaEnc_SeqOutStreamBuf outStream; + + outStream.vt.Write = SeqOutStreamBuf_Write; + outStream.data = dest; + outStream.rem = *destLen; + outStream.overflow = False; + + p->writeEndMark = False; + p->finished = False; + p->result = SZ_OK; + + if (reInit) + LzmaEnc_Init(p); + LzmaEnc_InitPrices(p); + + nowPos64 = p->nowPos64; + RangeEnc_Init(&p->rc); + p->rc.outStream = &outStream.vt; + + if (desiredPackSize == 0) + return SZ_ERROR_OUTPUT_EOF; + + res = LzmaEnc_CodeOneBlock(p, desiredPackSize, *unpackSize); + + *unpackSize = (UInt32)(p->nowPos64 - nowPos64); + *destLen -= outStream.rem; + if (outStream.overflow) + return SZ_ERROR_OUTPUT_EOF; + + return res; +} + + +static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress) +{ + SRes res = SZ_OK; + + #ifndef _7ZIP_ST + Byte allocaDummy[0x300]; + allocaDummy[0] = 0; + allocaDummy[1] = allocaDummy[0]; + #endif + + for (;;) + { + res = LzmaEnc_CodeOneBlock(p, 0, 0); + if (res != SZ_OK || p->finished) + break; + if (progress) + { + res = ICompressProgress_Progress(progress, p->nowPos64, RangeEnc_GetProcessed(&p->rc)); + if (res != SZ_OK) + { + res = SZ_ERROR_PROGRESS; + break; + } + } + } + + LzmaEnc_Finish(p); + + /* + if (res == SZ_OK && !Inline_MatchFinder_IsFinishedOK(&p->matchFinderBase)) + res = SZ_ERROR_FAIL; + } + */ + + return res; +} + + +SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress, + ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + RINOK(LzmaEnc_Prepare(pp, outStream, inStream, alloc, allocBig)); + return LzmaEnc_Encode2((CLzmaEnc *)pp, progress); +} + + +SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size) +{ + CLzmaEnc *p = (CLzmaEnc *)pp; + unsigned i; + UInt32 dictSize = p->dictSize; + if (*size < LZMA_PROPS_SIZE) + return SZ_ERROR_PARAM; + *size = LZMA_PROPS_SIZE; + props[0] = (Byte)((p->pb * 5 + p->lp) * 9 + p->lc); + + if (dictSize >= ((UInt32)1 << 22)) + { + UInt32 kDictMask = ((UInt32)1 << 20) - 1; + if (dictSize < (UInt32)0xFFFFFFFF - kDictMask) + dictSize = (dictSize + kDictMask) & ~kDictMask; + } + else for (i = 11; i <= 30; i++) + { + if (dictSize <= ((UInt32)2 << i)) { dictSize = (2 << i); break; } + if (dictSize <= ((UInt32)3 << i)) { dictSize = (3 << i); break; } + } + + for (i = 0; i < 4; i++) + props[1 + i] = (Byte)(dictSize >> (8 * i)); + return SZ_OK; +} + + +unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle pp) +{ + return ((CLzmaEnc *)pp)->writeEndMark; +} + + +SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, + int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + SRes res; + CLzmaEnc *p = (CLzmaEnc *)pp; + + CLzmaEnc_SeqOutStreamBuf outStream; + + outStream.vt.Write = SeqOutStreamBuf_Write; + outStream.data = dest; + outStream.rem = *destLen; + outStream.overflow = False; + + p->writeEndMark = writeEndMark; + p->rc.outStream = &outStream.vt; + + res = LzmaEnc_MemPrepare(pp, src, srcLen, 0, alloc, allocBig); + + if (res == SZ_OK) + { + res = LzmaEnc_Encode2(p, progress); + if (res == SZ_OK && p->nowPos64 != srcLen) + res = SZ_ERROR_FAIL; + } + + *destLen -= outStream.rem; + if (outStream.overflow) + return SZ_ERROR_OUTPUT_EOF; + return res; +} + + +SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, + const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, + ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + CLzmaEnc *p = (CLzmaEnc *)LzmaEnc_Create(alloc); + SRes res; + if (!p) + return SZ_ERROR_MEM; + + res = LzmaEnc_SetProps(p, props); + if (res == SZ_OK) + { + res = LzmaEnc_WriteProperties(p, propsEncoded, propsSize); + if (res == SZ_OK) + res = LzmaEnc_MemEncode(p, dest, destLen, src, srcLen, + writeEndMark, progress, alloc, allocBig); + } + + LzmaEnc_Destroy(p, alloc, allocBig); + return res; +} diff --git a/3rdparty/7z/src/LzmaEnc.h b/3rdparty/7z/src/LzmaEnc.h new file mode 100644 index 0000000000..c9938f04bc --- /dev/null +++ b/3rdparty/7z/src/LzmaEnc.h @@ -0,0 +1,76 @@ +/* LzmaEnc.h -- LZMA Encoder +2017-07-27 : Igor Pavlov : Public domain */ + +#ifndef __LZMA_ENC_H +#define __LZMA_ENC_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define LZMA_PROPS_SIZE 5 + +typedef struct _CLzmaEncProps +{ + int level; /* 0 <= level <= 9 */ + UInt32 dictSize; /* (1 << 12) <= dictSize <= (1 << 27) for 32-bit version + (1 << 12) <= dictSize <= (3 << 29) for 64-bit version + default = (1 << 24) */ + int lc; /* 0 <= lc <= 8, default = 3 */ + int lp; /* 0 <= lp <= 4, default = 0 */ + int pb; /* 0 <= pb <= 4, default = 2 */ + int algo; /* 0 - fast, 1 - normal, default = 1 */ + int fb; /* 5 <= fb <= 273, default = 32 */ + int btMode; /* 0 - hashChain Mode, 1 - binTree mode - normal, default = 1 */ + int numHashBytes; /* 2, 3 or 4, default = 4 */ + UInt32 mc; /* 1 <= mc <= (1 << 30), default = 32 */ + unsigned writeEndMark; /* 0 - do not write EOPM, 1 - write EOPM, default = 0 */ + int numThreads; /* 1 or 2, default = 2 */ + + UInt64 reduceSize; /* estimated size of data that will be compressed. default = (UInt64)(Int64)-1. + Encoder uses this value to reduce dictionary size */ +} CLzmaEncProps; + +void LzmaEncProps_Init(CLzmaEncProps *p); +void LzmaEncProps_Normalize(CLzmaEncProps *p); +UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2); + + +/* ---------- CLzmaEncHandle Interface ---------- */ + +/* LzmaEnc* functions can return the following exit codes: +SRes: + SZ_OK - OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_PARAM - Incorrect paramater in props + SZ_ERROR_WRITE - ISeqOutStream write callback error + SZ_ERROR_OUTPUT_EOF - output buffer overflow - version with (Byte *) output + SZ_ERROR_PROGRESS - some break from progress callback + SZ_ERROR_THREAD - error in multithreading functions (only for Mt version) +*/ + +typedef void * CLzmaEncHandle; + +CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc); +void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig); + +SRes LzmaEnc_SetProps(CLzmaEncHandle p, const CLzmaEncProps *props); +void LzmaEnc_SetDataSize(CLzmaEncHandle p, UInt64 expectedDataSiize); +SRes LzmaEnc_WriteProperties(CLzmaEncHandle p, Byte *properties, SizeT *size); +unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle p); + +SRes LzmaEnc_Encode(CLzmaEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, + ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig); +SRes LzmaEnc_MemEncode(CLzmaEncHandle p, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, + int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig); + + +/* ---------- One Call Interface ---------- */ + +SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen, + const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark, + ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/LzmaLib.c b/3rdparty/7z/src/LzmaLib.c new file mode 100644 index 0000000000..c10cf1a0f2 --- /dev/null +++ b/3rdparty/7z/src/LzmaLib.c @@ -0,0 +1,40 @@ +/* LzmaLib.c -- LZMA library wrapper +2015-06-13 : Igor Pavlov : Public domain */ + +#include "Alloc.h" +#include "LzmaDec.h" +#include "LzmaEnc.h" +#include "LzmaLib.h" + +MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen, + unsigned char *outProps, size_t *outPropsSize, + int level, /* 0 <= level <= 9, default = 5 */ + unsigned dictSize, /* use (1 << N) or (3 << N). 4 KB < dictSize <= 128 MB */ + int lc, /* 0 <= lc <= 8, default = 3 */ + int lp, /* 0 <= lp <= 4, default = 0 */ + int pb, /* 0 <= pb <= 4, default = 2 */ + int fb, /* 5 <= fb <= 273, default = 32 */ + int numThreads /* 1 or 2, default = 2 */ +) +{ + CLzmaEncProps props; + LzmaEncProps_Init(&props); + props.level = level; + props.dictSize = dictSize; + props.lc = lc; + props.lp = lp; + props.pb = pb; + props.fb = fb; + props.numThreads = numThreads; + + return LzmaEncode(dest, destLen, src, srcLen, &props, outProps, outPropsSize, 0, + NULL, &g_Alloc, &g_Alloc); +} + + +MY_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t *srcLen, + const unsigned char *props, size_t propsSize) +{ + ELzmaStatus status; + return LzmaDecode(dest, destLen, src, srcLen, props, (unsigned)propsSize, LZMA_FINISH_ANY, &status, &g_Alloc); +} diff --git a/3rdparty/7z/src/LzmaLib.h b/3rdparty/7z/src/LzmaLib.h new file mode 100644 index 0000000000..5c35e53654 --- /dev/null +++ b/3rdparty/7z/src/LzmaLib.h @@ -0,0 +1,131 @@ +/* LzmaLib.h -- LZMA library interface +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __LZMA_LIB_H +#define __LZMA_LIB_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define MY_STDAPI int MY_STD_CALL + +#define LZMA_PROPS_SIZE 5 + +/* +RAM requirements for LZMA: + for compression: (dictSize * 11.5 + 6 MB) + state_size + for decompression: dictSize + state_size + state_size = (4 + (1.5 << (lc + lp))) KB + by default (lc=3, lp=0), state_size = 16 KB. + +LZMA properties (5 bytes) format + Offset Size Description + 0 1 lc, lp and pb in encoded form. + 1 4 dictSize (little endian). +*/ + +/* +LzmaCompress +------------ + +outPropsSize - + In: the pointer to the size of outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5. + Out: the pointer to the size of written properties in outProps buffer; *outPropsSize = LZMA_PROPS_SIZE = 5. + + LZMA Encoder will use defult values for any parameter, if it is + -1 for any from: level, loc, lp, pb, fb, numThreads + 0 for dictSize + +level - compression level: 0 <= level <= 9; + + level dictSize algo fb + 0: 16 KB 0 32 + 1: 64 KB 0 32 + 2: 256 KB 0 32 + 3: 1 MB 0 32 + 4: 4 MB 0 32 + 5: 16 MB 1 32 + 6: 32 MB 1 32 + 7+: 64 MB 1 64 + + The default value for "level" is 5. + + algo = 0 means fast method + algo = 1 means normal method + +dictSize - The dictionary size in bytes. The maximum value is + 128 MB = (1 << 27) bytes for 32-bit version + 1 GB = (1 << 30) bytes for 64-bit version + The default value is 16 MB = (1 << 24) bytes. + It's recommended to use the dictionary that is larger than 4 KB and + that can be calculated as (1 << N) or (3 << N) sizes. + +lc - The number of literal context bits (high bits of previous literal). + It can be in the range from 0 to 8. The default value is 3. + Sometimes lc=4 gives the gain for big files. + +lp - The number of literal pos bits (low bits of current position for literals). + It can be in the range from 0 to 4. The default value is 0. + The lp switch is intended for periodical data when the period is equal to 2^lp. + For example, for 32-bit (4 bytes) periodical data you can use lp=2. Often it's + better to set lc=0, if you change lp switch. + +pb - The number of pos bits (low bits of current position). + It can be in the range from 0 to 4. The default value is 2. + The pb switch is intended for periodical data when the period is equal 2^pb. + +fb - Word size (the number of fast bytes). + It can be in the range from 5 to 273. The default value is 32. + Usually, a big number gives a little bit better compression ratio and + slower compression process. + +numThreads - The number of thereads. 1 or 2. The default value is 2. + Fast mode (algo = 0) can use only 1 thread. + +Out: + destLen - processed output size +Returns: + SZ_OK - OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_PARAM - Incorrect paramater + SZ_ERROR_OUTPUT_EOF - output buffer overflow + SZ_ERROR_THREAD - errors in multithreading functions (only for Mt version) +*/ + +MY_STDAPI LzmaCompress(unsigned char *dest, size_t *destLen, const unsigned char *src, size_t srcLen, + unsigned char *outProps, size_t *outPropsSize, /* *outPropsSize must be = 5 */ + int level, /* 0 <= level <= 9, default = 5 */ + unsigned dictSize, /* default = (1 << 24) */ + int lc, /* 0 <= lc <= 8, default = 3 */ + int lp, /* 0 <= lp <= 4, default = 0 */ + int pb, /* 0 <= pb <= 4, default = 2 */ + int fb, /* 5 <= fb <= 273, default = 32 */ + int numThreads /* 1 or 2, default = 2 */ + ); + +/* +LzmaUncompress +-------------- +In: + dest - output data + destLen - output data size + src - input data + srcLen - input data size +Out: + destLen - processed output size + srcLen - processed input size +Returns: + SZ_OK - OK + SZ_ERROR_DATA - Data error + SZ_ERROR_MEM - Memory allocation arror + SZ_ERROR_UNSUPPORTED - Unsupported properties + SZ_ERROR_INPUT_EOF - it needs more bytes in input buffer (src) +*/ + +MY_STDAPI LzmaUncompress(unsigned char *dest, size_t *destLen, const unsigned char *src, SizeT *srcLen, + const unsigned char *props, size_t propsSize); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/MtCoder.c b/3rdparty/7z/src/MtCoder.c new file mode 100644 index 0000000000..5667f2d5b5 --- /dev/null +++ b/3rdparty/7z/src/MtCoder.c @@ -0,0 +1,601 @@ +/* MtCoder.c -- Multi-thread Coder +2018-07-04 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "MtCoder.h" + +#ifndef _7ZIP_ST + +SRes MtProgressThunk_Progress(const ICompressProgress *pp, UInt64 inSize, UInt64 outSize) +{ + CMtProgressThunk *thunk = CONTAINER_FROM_VTBL(pp, CMtProgressThunk, vt); + UInt64 inSize2 = 0; + UInt64 outSize2 = 0; + if (inSize != (UInt64)(Int64)-1) + { + inSize2 = inSize - thunk->inSize; + thunk->inSize = inSize; + } + if (outSize != (UInt64)(Int64)-1) + { + outSize2 = outSize - thunk->outSize; + thunk->outSize = outSize; + } + return MtProgress_ProgressAdd(thunk->mtProgress, inSize2, outSize2); +} + + +void MtProgressThunk_CreateVTable(CMtProgressThunk *p) +{ + p->vt.Progress = MtProgressThunk_Progress; +} + + + +#define RINOK_THREAD(x) { if ((x) != 0) return SZ_ERROR_THREAD; } + + +static WRes ArEvent_OptCreate_And_Reset(CEvent *p) +{ + if (Event_IsCreated(p)) + return Event_Reset(p); + return AutoResetEvent_CreateNotSignaled(p); +} + + +static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp); + + +static SRes MtCoderThread_CreateAndStart(CMtCoderThread *t) +{ + WRes wres = ArEvent_OptCreate_And_Reset(&t->startEvent); + if (wres == 0) + { + t->stop = False; + if (!Thread_WasCreated(&t->thread)) + wres = Thread_Create(&t->thread, ThreadFunc, t); + if (wres == 0) + wres = Event_Set(&t->startEvent); + } + if (wres == 0) + return SZ_OK; + return MY_SRes_HRESULT_FROM_WRes(wres); +} + + +static void MtCoderThread_Destruct(CMtCoderThread *t) +{ + if (Thread_WasCreated(&t->thread)) + { + t->stop = 1; + Event_Set(&t->startEvent); + Thread_Wait(&t->thread); + Thread_Close(&t->thread); + } + + Event_Close(&t->startEvent); + + if (t->inBuf) + { + ISzAlloc_Free(t->mtCoder->allocBig, t->inBuf); + t->inBuf = NULL; + } +} + + + +static SRes FullRead(ISeqInStream *stream, Byte *data, size_t *processedSize) +{ + size_t size = *processedSize; + *processedSize = 0; + while (size != 0) + { + size_t cur = size; + SRes res = ISeqInStream_Read(stream, data, &cur); + *processedSize += cur; + data += cur; + size -= cur; + RINOK(res); + if (cur == 0) + return SZ_OK; + } + return SZ_OK; +} + + +/* + ThreadFunc2() returns: + SZ_OK - in all normal cases (even for stream error or memory allocation error) + SZ_ERROR_THREAD - in case of failure in system synch function +*/ + +static SRes ThreadFunc2(CMtCoderThread *t) +{ + CMtCoder *mtc = t->mtCoder; + + for (;;) + { + unsigned bi; + SRes res; + SRes res2; + BoolInt finished; + unsigned bufIndex; + size_t size; + const Byte *inData; + UInt64 readProcessed = 0; + + RINOK_THREAD(Event_Wait(&mtc->readEvent)) + + /* after Event_Wait(&mtc->readEvent) we must call Event_Set(&mtc->readEvent) in any case to unlock another threads */ + + if (mtc->stopReading) + { + return Event_Set(&mtc->readEvent) == 0 ? SZ_OK : SZ_ERROR_THREAD; + } + + res = MtProgress_GetError(&mtc->mtProgress); + + size = 0; + inData = NULL; + finished = True; + + if (res == SZ_OK) + { + size = mtc->blockSize; + if (mtc->inStream) + { + if (!t->inBuf) + { + t->inBuf = (Byte *)ISzAlloc_Alloc(mtc->allocBig, mtc->blockSize); + if (!t->inBuf) + res = SZ_ERROR_MEM; + } + if (res == SZ_OK) + { + res = FullRead(mtc->inStream, t->inBuf, &size); + readProcessed = mtc->readProcessed + size; + mtc->readProcessed = readProcessed; + } + if (res != SZ_OK) + { + mtc->readRes = res; + /* after reading error - we can stop encoding of previous blocks */ + MtProgress_SetError(&mtc->mtProgress, res); + } + else + finished = (size != mtc->blockSize); + } + else + { + size_t rem; + readProcessed = mtc->readProcessed; + rem = mtc->inDataSize - (size_t)readProcessed; + if (size > rem) + size = rem; + inData = mtc->inData + (size_t)readProcessed; + readProcessed += size; + mtc->readProcessed = readProcessed; + finished = (mtc->inDataSize == (size_t)readProcessed); + } + } + + /* we must get some block from blocksSemaphore before Event_Set(&mtc->readEvent) */ + + res2 = SZ_OK; + + if (Semaphore_Wait(&mtc->blocksSemaphore) != 0) + { + res2 = SZ_ERROR_THREAD; + if (res == SZ_OK) + { + res = res2; + // MtProgress_SetError(&mtc->mtProgress, res); + } + } + + bi = mtc->blockIndex; + + if (++mtc->blockIndex >= mtc->numBlocksMax) + mtc->blockIndex = 0; + + bufIndex = (unsigned)(int)-1; + + if (res == SZ_OK) + res = MtProgress_GetError(&mtc->mtProgress); + + if (res != SZ_OK) + finished = True; + + if (!finished) + { + if (mtc->numStartedThreads < mtc->numStartedThreadsLimit + && mtc->expectedDataSize != readProcessed) + { + res = MtCoderThread_CreateAndStart(&mtc->threads[mtc->numStartedThreads]); + if (res == SZ_OK) + mtc->numStartedThreads++; + else + { + MtProgress_SetError(&mtc->mtProgress, res); + finished = True; + } + } + } + + if (finished) + mtc->stopReading = True; + + RINOK_THREAD(Event_Set(&mtc->readEvent)) + + if (res2 != SZ_OK) + return res2; + + if (res == SZ_OK) + { + CriticalSection_Enter(&mtc->cs); + bufIndex = mtc->freeBlockHead; + mtc->freeBlockHead = mtc->freeBlockList[bufIndex]; + CriticalSection_Leave(&mtc->cs); + + res = mtc->mtCallback->Code(mtc->mtCallbackObject, t->index, bufIndex, + mtc->inStream ? t->inBuf : inData, size, finished); + + // MtProgress_Reinit(&mtc->mtProgress, t->index); + + if (res != SZ_OK) + MtProgress_SetError(&mtc->mtProgress, res); + } + + { + CMtCoderBlock *block = &mtc->blocks[bi]; + block->res = res; + block->bufIndex = bufIndex; + block->finished = finished; + } + + #ifdef MTCODER__USE_WRITE_THREAD + RINOK_THREAD(Event_Set(&mtc->writeEvents[bi])) + #else + { + unsigned wi; + { + CriticalSection_Enter(&mtc->cs); + wi = mtc->writeIndex; + if (wi == bi) + mtc->writeIndex = (unsigned)(int)-1; + else + mtc->ReadyBlocks[bi] = True; + CriticalSection_Leave(&mtc->cs); + } + + if (wi != bi) + { + if (res != SZ_OK || finished) + return 0; + continue; + } + + if (mtc->writeRes != SZ_OK) + res = mtc->writeRes; + + for (;;) + { + if (res == SZ_OK && bufIndex != (unsigned)(int)-1) + { + res = mtc->mtCallback->Write(mtc->mtCallbackObject, bufIndex); + if (res != SZ_OK) + { + mtc->writeRes = res; + MtProgress_SetError(&mtc->mtProgress, res); + } + } + + if (++wi >= mtc->numBlocksMax) + wi = 0; + { + BoolInt isReady; + + CriticalSection_Enter(&mtc->cs); + + if (bufIndex != (unsigned)(int)-1) + { + mtc->freeBlockList[bufIndex] = mtc->freeBlockHead; + mtc->freeBlockHead = bufIndex; + } + + isReady = mtc->ReadyBlocks[wi]; + + if (isReady) + mtc->ReadyBlocks[wi] = False; + else + mtc->writeIndex = wi; + + CriticalSection_Leave(&mtc->cs); + + RINOK_THREAD(Semaphore_Release1(&mtc->blocksSemaphore)) + + if (!isReady) + break; + } + + { + CMtCoderBlock *block = &mtc->blocks[wi]; + if (res == SZ_OK && block->res != SZ_OK) + res = block->res; + bufIndex = block->bufIndex; + finished = block->finished; + } + } + } + #endif + + if (finished || res != SZ_OK) + return 0; + } +} + + +static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp) +{ + CMtCoderThread *t = (CMtCoderThread *)pp; + for (;;) + { + if (Event_Wait(&t->startEvent) != 0) + return SZ_ERROR_THREAD; + if (t->stop) + return 0; + { + SRes res = ThreadFunc2(t); + CMtCoder *mtc = t->mtCoder; + if (res != SZ_OK) + { + MtProgress_SetError(&mtc->mtProgress, res); + } + + #ifndef MTCODER__USE_WRITE_THREAD + { + unsigned numFinished = (unsigned)InterlockedIncrement(&mtc->numFinishedThreads); + if (numFinished == mtc->numStartedThreads) + if (Event_Set(&mtc->finishedEvent) != 0) + return SZ_ERROR_THREAD; + } + #endif + } + } +} + + + +void MtCoder_Construct(CMtCoder *p) +{ + unsigned i; + + p->blockSize = 0; + p->numThreadsMax = 0; + p->expectedDataSize = (UInt64)(Int64)-1; + + p->inStream = NULL; + p->inData = NULL; + p->inDataSize = 0; + + p->progress = NULL; + p->allocBig = NULL; + + p->mtCallback = NULL; + p->mtCallbackObject = NULL; + + p->allocatedBufsSize = 0; + + Event_Construct(&p->readEvent); + Semaphore_Construct(&p->blocksSemaphore); + + for (i = 0; i < MTCODER__THREADS_MAX; i++) + { + CMtCoderThread *t = &p->threads[i]; + t->mtCoder = p; + t->index = i; + t->inBuf = NULL; + t->stop = False; + Event_Construct(&t->startEvent); + Thread_Construct(&t->thread); + } + + #ifdef MTCODER__USE_WRITE_THREAD + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + Event_Construct(&p->writeEvents[i]); + #else + Event_Construct(&p->finishedEvent); + #endif + + CriticalSection_Init(&p->cs); + CriticalSection_Init(&p->mtProgress.cs); +} + + + + +static void MtCoder_Free(CMtCoder *p) +{ + unsigned i; + + /* + p->stopReading = True; + if (Event_IsCreated(&p->readEvent)) + Event_Set(&p->readEvent); + */ + + for (i = 0; i < MTCODER__THREADS_MAX; i++) + MtCoderThread_Destruct(&p->threads[i]); + + Event_Close(&p->readEvent); + Semaphore_Close(&p->blocksSemaphore); + + #ifdef MTCODER__USE_WRITE_THREAD + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + Event_Close(&p->writeEvents[i]); + #else + Event_Close(&p->finishedEvent); + #endif +} + + +void MtCoder_Destruct(CMtCoder *p) +{ + MtCoder_Free(p); + + CriticalSection_Delete(&p->cs); + CriticalSection_Delete(&p->mtProgress.cs); +} + + +SRes MtCoder_Code(CMtCoder *p) +{ + unsigned numThreads = p->numThreadsMax; + unsigned numBlocksMax; + unsigned i; + SRes res = SZ_OK; + + if (numThreads > MTCODER__THREADS_MAX) + numThreads = MTCODER__THREADS_MAX; + numBlocksMax = MTCODER__GET_NUM_BLOCKS_FROM_THREADS(numThreads); + + if (p->blockSize < ((UInt32)1 << 26)) numBlocksMax++; + if (p->blockSize < ((UInt32)1 << 24)) numBlocksMax++; + if (p->blockSize < ((UInt32)1 << 22)) numBlocksMax++; + + if (numBlocksMax > MTCODER__BLOCKS_MAX) + numBlocksMax = MTCODER__BLOCKS_MAX; + + if (p->blockSize != p->allocatedBufsSize) + { + for (i = 0; i < MTCODER__THREADS_MAX; i++) + { + CMtCoderThread *t = &p->threads[i]; + if (t->inBuf) + { + ISzAlloc_Free(p->allocBig, t->inBuf); + t->inBuf = NULL; + } + } + p->allocatedBufsSize = p->blockSize; + } + + p->readRes = SZ_OK; + + MtProgress_Init(&p->mtProgress, p->progress); + + #ifdef MTCODER__USE_WRITE_THREAD + for (i = 0; i < numBlocksMax; i++) + { + RINOK_THREAD(ArEvent_OptCreate_And_Reset(&p->writeEvents[i])); + } + #else + RINOK_THREAD(ArEvent_OptCreate_And_Reset(&p->finishedEvent)); + #endif + + { + RINOK_THREAD(ArEvent_OptCreate_And_Reset(&p->readEvent)); + + if (Semaphore_IsCreated(&p->blocksSemaphore)) + { + RINOK_THREAD(Semaphore_Close(&p->blocksSemaphore)); + } + RINOK_THREAD(Semaphore_Create(&p->blocksSemaphore, numBlocksMax, numBlocksMax)); + } + + for (i = 0; i < MTCODER__BLOCKS_MAX - 1; i++) + p->freeBlockList[i] = i + 1; + p->freeBlockList[MTCODER__BLOCKS_MAX - 1] = (unsigned)(int)-1; + p->freeBlockHead = 0; + + p->readProcessed = 0; + p->blockIndex = 0; + p->numBlocksMax = numBlocksMax; + p->stopReading = False; + + #ifndef MTCODER__USE_WRITE_THREAD + p->writeIndex = 0; + p->writeRes = SZ_OK; + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + p->ReadyBlocks[i] = False; + p->numFinishedThreads = 0; + #endif + + p->numStartedThreadsLimit = numThreads; + p->numStartedThreads = 0; + + // for (i = 0; i < numThreads; i++) + { + CMtCoderThread *nextThread = &p->threads[p->numStartedThreads++]; + RINOK(MtCoderThread_CreateAndStart(nextThread)); + } + + RINOK_THREAD(Event_Set(&p->readEvent)) + + #ifdef MTCODER__USE_WRITE_THREAD + { + unsigned bi = 0; + + for (;; bi++) + { + if (bi >= numBlocksMax) + bi = 0; + + RINOK_THREAD(Event_Wait(&p->writeEvents[bi])) + + { + const CMtCoderBlock *block = &p->blocks[bi]; + unsigned bufIndex = block->bufIndex; + BoolInt finished = block->finished; + if (res == SZ_OK && block->res != SZ_OK) + res = block->res; + + if (bufIndex != (unsigned)(int)-1) + { + if (res == SZ_OK) + { + res = p->mtCallback->Write(p->mtCallbackObject, bufIndex); + if (res != SZ_OK) + MtProgress_SetError(&p->mtProgress, res); + } + + CriticalSection_Enter(&p->cs); + { + p->freeBlockList[bufIndex] = p->freeBlockHead; + p->freeBlockHead = bufIndex; + } + CriticalSection_Leave(&p->cs); + } + + RINOK_THREAD(Semaphore_Release1(&p->blocksSemaphore)) + + if (finished) + break; + } + } + } + #else + { + WRes wres = Event_Wait(&p->finishedEvent); + res = MY_SRes_HRESULT_FROM_WRes(wres); + } + #endif + + if (res == SZ_OK) + res = p->readRes; + + if (res == SZ_OK) + res = p->mtProgress.res; + + #ifndef MTCODER__USE_WRITE_THREAD + if (res == SZ_OK) + res = p->writeRes; + #endif + + if (res != SZ_OK) + MtCoder_Free(p); + return res; +} + +#endif diff --git a/3rdparty/7z/src/MtCoder.h b/3rdparty/7z/src/MtCoder.h new file mode 100644 index 0000000000..603329d367 --- /dev/null +++ b/3rdparty/7z/src/MtCoder.h @@ -0,0 +1,141 @@ +/* MtCoder.h -- Multi-thread Coder +2018-07-04 : Igor Pavlov : Public domain */ + +#ifndef __MT_CODER_H +#define __MT_CODER_H + +#include "MtDec.h" + +EXTERN_C_BEGIN + +/* + if ( defined MTCODER__USE_WRITE_THREAD) : main thread writes all data blocks to output stream + if (not defined MTCODER__USE_WRITE_THREAD) : any coder thread can write data blocks to output stream +*/ +/* #define MTCODER__USE_WRITE_THREAD */ + +#ifndef _7ZIP_ST + #define MTCODER__GET_NUM_BLOCKS_FROM_THREADS(numThreads) ((numThreads) + (numThreads) / 8 + 1) + #define MTCODER__THREADS_MAX 64 + #define MTCODER__BLOCKS_MAX (MTCODER__GET_NUM_BLOCKS_FROM_THREADS(MTCODER__THREADS_MAX) + 3) +#else + #define MTCODER__THREADS_MAX 1 + #define MTCODER__BLOCKS_MAX 1 +#endif + + +#ifndef _7ZIP_ST + + +typedef struct +{ + ICompressProgress vt; + CMtProgress *mtProgress; + UInt64 inSize; + UInt64 outSize; +} CMtProgressThunk; + +void MtProgressThunk_CreateVTable(CMtProgressThunk *p); + +#define MtProgressThunk_Init(p) { (p)->inSize = 0; (p)->outSize = 0; } + + +struct _CMtCoder; + + +typedef struct +{ + struct _CMtCoder *mtCoder; + unsigned index; + int stop; + Byte *inBuf; + + CAutoResetEvent startEvent; + CThread thread; +} CMtCoderThread; + + +typedef struct +{ + SRes (*Code)(void *p, unsigned coderIndex, unsigned outBufIndex, + const Byte *src, size_t srcSize, int finished); + SRes (*Write)(void *p, unsigned outBufIndex); +} IMtCoderCallback2; + + +typedef struct +{ + SRes res; + unsigned bufIndex; + BoolInt finished; +} CMtCoderBlock; + + +typedef struct _CMtCoder +{ + /* input variables */ + + size_t blockSize; /* size of input block */ + unsigned numThreadsMax; + UInt64 expectedDataSize; + + ISeqInStream *inStream; + const Byte *inData; + size_t inDataSize; + + ICompressProgress *progress; + ISzAllocPtr allocBig; + + IMtCoderCallback2 *mtCallback; + void *mtCallbackObject; + + + /* internal variables */ + + size_t allocatedBufsSize; + + CAutoResetEvent readEvent; + CSemaphore blocksSemaphore; + + BoolInt stopReading; + SRes readRes; + + #ifdef MTCODER__USE_WRITE_THREAD + CAutoResetEvent writeEvents[MTCODER__BLOCKS_MAX]; + #else + CAutoResetEvent finishedEvent; + SRes writeRes; + unsigned writeIndex; + Byte ReadyBlocks[MTCODER__BLOCKS_MAX]; + LONG numFinishedThreads; + #endif + + unsigned numStartedThreadsLimit; + unsigned numStartedThreads; + + unsigned numBlocksMax; + unsigned blockIndex; + UInt64 readProcessed; + + CCriticalSection cs; + + unsigned freeBlockHead; + unsigned freeBlockList[MTCODER__BLOCKS_MAX]; + + CMtProgress mtProgress; + CMtCoderBlock blocks[MTCODER__BLOCKS_MAX]; + CMtCoderThread threads[MTCODER__THREADS_MAX]; +} CMtCoder; + + +void MtCoder_Construct(CMtCoder *p); +void MtCoder_Destruct(CMtCoder *p); +SRes MtCoder_Code(CMtCoder *p); + + +#endif + + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/MtDec.c b/3rdparty/7z/src/MtDec.c new file mode 100644 index 0000000000..25a8b046d9 --- /dev/null +++ b/3rdparty/7z/src/MtDec.c @@ -0,0 +1,1138 @@ +/* MtDec.c -- Multi-thread Decoder +2019-02-02 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +// #define SHOW_DEBUG_INFO + +// #include + +#ifdef SHOW_DEBUG_INFO +#include +#endif + +#ifdef SHOW_DEBUG_INFO +#define PRF(x) x +#else +#define PRF(x) +#endif + +#define PRF_STR_INT(s, d) PRF(printf("\n" s " %d\n", (unsigned)d)) + +#include "MtDec.h" + +#ifndef _7ZIP_ST + +void MtProgress_Init(CMtProgress *p, ICompressProgress *progress) +{ + p->progress = progress; + p->res = SZ_OK; + p->totalInSize = 0; + p->totalOutSize = 0; +} + + +SRes MtProgress_Progress_ST(CMtProgress *p) +{ + if (p->res == SZ_OK && p->progress) + if (ICompressProgress_Progress(p->progress, p->totalInSize, p->totalOutSize) != SZ_OK) + p->res = SZ_ERROR_PROGRESS; + return p->res; +} + + +SRes MtProgress_ProgressAdd(CMtProgress *p, UInt64 inSize, UInt64 outSize) +{ + SRes res; + CriticalSection_Enter(&p->cs); + + p->totalInSize += inSize; + p->totalOutSize += outSize; + if (p->res == SZ_OK && p->progress) + if (ICompressProgress_Progress(p->progress, p->totalInSize, p->totalOutSize) != SZ_OK) + p->res = SZ_ERROR_PROGRESS; + res = p->res; + + CriticalSection_Leave(&p->cs); + return res; +} + + +SRes MtProgress_GetError(CMtProgress *p) +{ + SRes res; + CriticalSection_Enter(&p->cs); + res = p->res; + CriticalSection_Leave(&p->cs); + return res; +} + + +void MtProgress_SetError(CMtProgress *p, SRes res) +{ + CriticalSection_Enter(&p->cs); + if (p->res == SZ_OK) + p->res = res; + CriticalSection_Leave(&p->cs); +} + + +#define RINOK_THREAD(x) RINOK(x) + + +static WRes ArEvent_OptCreate_And_Reset(CEvent *p) +{ + if (Event_IsCreated(p)) + return Event_Reset(p); + return AutoResetEvent_CreateNotSignaled(p); +} + + +struct __CMtDecBufLink +{ + struct __CMtDecBufLink *next; + void *pad[3]; +}; + +typedef struct __CMtDecBufLink CMtDecBufLink; + +#define MTDEC__LINK_DATA_OFFSET sizeof(CMtDecBufLink) +#define MTDEC__DATA_PTR_FROM_LINK(link) ((Byte *)(link) + MTDEC__LINK_DATA_OFFSET) + + + +static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp); + + +static WRes MtDecThread_CreateEvents(CMtDecThread *t) +{ + WRes wres = ArEvent_OptCreate_And_Reset(&t->canWrite); + if (wres == 0) + { + wres = ArEvent_OptCreate_And_Reset(&t->canRead); + if (wres == 0) + return SZ_OK; + } + return wres; +} + + +static SRes MtDecThread_CreateAndStart(CMtDecThread *t) +{ + WRes wres = MtDecThread_CreateEvents(t); + // wres = 17; // for test + if (wres == 0) + { + if (Thread_WasCreated(&t->thread)) + return SZ_OK; + wres = Thread_Create(&t->thread, ThreadFunc, t); + if (wres == 0) + return SZ_OK; + } + return MY_SRes_HRESULT_FROM_WRes(wres); +} + + +void MtDecThread_FreeInBufs(CMtDecThread *t) +{ + if (t->inBuf) + { + void *link = t->inBuf; + t->inBuf = NULL; + do + { + void *next = ((CMtDecBufLink *)link)->next; + ISzAlloc_Free(t->mtDec->alloc, link); + link = next; + } + while (link); + } +} + + +static void MtDecThread_CloseThread(CMtDecThread *t) +{ + if (Thread_WasCreated(&t->thread)) + { + Event_Set(&t->canWrite); /* we can disable it. There are no threads waiting canWrite in normal cases */ + Event_Set(&t->canRead); + Thread_Wait(&t->thread); + Thread_Close(&t->thread); + } + + Event_Close(&t->canRead); + Event_Close(&t->canWrite); +} + +static void MtDec_CloseThreads(CMtDec *p) +{ + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + MtDecThread_CloseThread(&p->threads[i]); +} + +static void MtDecThread_Destruct(CMtDecThread *t) +{ + MtDecThread_CloseThread(t); + MtDecThread_FreeInBufs(t); +} + + + +static SRes FullRead(ISeqInStream *stream, Byte *data, size_t *processedSize) +{ + size_t size = *processedSize; + *processedSize = 0; + while (size != 0) + { + size_t cur = size; + SRes res = ISeqInStream_Read(stream, data, &cur); + *processedSize += cur; + data += cur; + size -= cur; + RINOK(res); + if (cur == 0) + return SZ_OK; + } + return SZ_OK; +} + + +static SRes MtDec_GetError_Spec(CMtDec *p, UInt64 interruptIndex, BoolInt *wasInterrupted) +{ + SRes res; + CriticalSection_Enter(&p->mtProgress.cs); + *wasInterrupted = (p->needInterrupt && interruptIndex > p->interruptIndex); + res = p->mtProgress.res; + CriticalSection_Leave(&p->mtProgress.cs); + return res; +} + +static SRes MtDec_Progress_GetError_Spec(CMtDec *p, UInt64 inSize, UInt64 outSize, UInt64 interruptIndex, BoolInt *wasInterrupted) +{ + SRes res; + CriticalSection_Enter(&p->mtProgress.cs); + + p->mtProgress.totalInSize += inSize; + p->mtProgress.totalOutSize += outSize; + if (p->mtProgress.res == SZ_OK && p->mtProgress.progress) + if (ICompressProgress_Progress(p->mtProgress.progress, p->mtProgress.totalInSize, p->mtProgress.totalOutSize) != SZ_OK) + p->mtProgress.res = SZ_ERROR_PROGRESS; + + *wasInterrupted = (p->needInterrupt && interruptIndex > p->interruptIndex); + res = p->mtProgress.res; + + CriticalSection_Leave(&p->mtProgress.cs); + + return res; +} + +static void MtDec_Interrupt(CMtDec *p, UInt64 interruptIndex) +{ + CriticalSection_Enter(&p->mtProgress.cs); + if (!p->needInterrupt || interruptIndex < p->interruptIndex) + { + p->interruptIndex = interruptIndex; + p->needInterrupt = True; + } + CriticalSection_Leave(&p->mtProgress.cs); +} + +Byte *MtDec_GetCrossBuff(CMtDec *p) +{ + Byte *cr = p->crossBlock; + if (!cr) + { + cr = (Byte *)ISzAlloc_Alloc(p->alloc, MTDEC__LINK_DATA_OFFSET + p->inBufSize); + if (!cr) + return NULL; + p->crossBlock = cr; + } + return MTDEC__DATA_PTR_FROM_LINK(cr); +} + + +/* + ThreadFunc2() returns: + 0 - in all normal cases (even for stream error or memory allocation error) + (!= 0) - WRes error return by system threading function +*/ + +// #define MTDEC_ProgessStep (1 << 22) +#define MTDEC_ProgessStep (1 << 0) + +static WRes ThreadFunc2(CMtDecThread *t) +{ + CMtDec *p = t->mtDec; + + PRF_STR_INT("ThreadFunc2", t->index); + + // SetThreadAffinityMask(GetCurrentThread(), 1 << t->index); + + for (;;) + { + SRes res, codeRes; + BoolInt wasInterrupted, isAllocError, overflow, finish; + SRes threadingErrorSRes; + BoolInt needCode, needWrite, needContinue; + + size_t inDataSize_Start; + UInt64 inDataSize; + // UInt64 inDataSize_Full; + + UInt64 blockIndex; + + UInt64 inPrev = 0; + UInt64 outPrev = 0; + UInt64 inCodePos; + UInt64 outCodePos; + + Byte *afterEndData = NULL; + size_t afterEndData_Size = 0; + + BoolInt canCreateNewThread = False; + // CMtDecCallbackInfo parse; + CMtDecThread *nextThread; + + PRF_STR_INT("Event_Wait(&t->canRead)", t->index); + + RINOK_THREAD(Event_Wait(&t->canRead)); + if (p->exitThread) + return 0; + + PRF_STR_INT("after Event_Wait(&t->canRead)", t->index); + + // if (t->index == 3) return 19; // for test + + blockIndex = p->blockIndex++; + + // PRF(printf("\ncanRead\n")) + + res = MtDec_Progress_GetError_Spec(p, 0, 0, blockIndex, &wasInterrupted); + + finish = p->readWasFinished; + needCode = False; + needWrite = False; + isAllocError = False; + overflow = False; + + inDataSize_Start = 0; + inDataSize = 0; + // inDataSize_Full = 0; + + if (res == SZ_OK && !wasInterrupted) + { + // if (p->inStream) + { + CMtDecBufLink *prev = NULL; + CMtDecBufLink *link = (CMtDecBufLink *)t->inBuf; + size_t crossSize = p->crossEnd - p->crossStart; + + PRF(printf("\ncrossSize = %d\n", crossSize)); + + for (;;) + { + if (!link) + { + link = (CMtDecBufLink *)ISzAlloc_Alloc(p->alloc, MTDEC__LINK_DATA_OFFSET + p->inBufSize); + if (!link) + { + finish = True; + // p->allocError_for_Read_BlockIndex = blockIndex; + isAllocError = True; + break; + } + link->next = NULL; + if (prev) + { + // static unsigned g_num = 0; + // printf("\n%6d : %x", ++g_num, (unsigned)(size_t)((Byte *)link - (Byte *)prev)); + prev->next = link; + } + else + t->inBuf = (void *)link; + } + + { + Byte *data = MTDEC__DATA_PTR_FROM_LINK(link); + Byte *parseData = data; + size_t size; + + if (crossSize != 0) + { + inDataSize = crossSize; + // inDataSize_Full = inDataSize; + inDataSize_Start = crossSize; + size = crossSize; + parseData = MTDEC__DATA_PTR_FROM_LINK(p->crossBlock) + p->crossStart; + PRF(printf("\ncross : crossStart = %7d crossEnd = %7d finish = %1d", + (int)p->crossStart, (int)p->crossEnd, (int)finish)); + } + else + { + size = p->inBufSize; + + res = FullRead(p->inStream, data, &size); + + // size = 10; // test + + inDataSize += size; + // inDataSize_Full = inDataSize; + if (!prev) + inDataSize_Start = size; + + p->readProcessed += size; + finish = (size != p->inBufSize); + if (finish) + p->readWasFinished = True; + + // res = E_INVALIDARG; // test + + if (res != SZ_OK) + { + // PRF(printf("\nRead error = %d\n", res)) + // we want to decode all data before error + p->readRes = res; + // p->readError_BlockIndex = blockIndex; + p->readWasFinished = True; + finish = True; + res = SZ_OK; + // break; + } + + if (inDataSize - inPrev >= MTDEC_ProgessStep) + { + res = MtDec_Progress_GetError_Spec(p, 0, 0, blockIndex, &wasInterrupted); + if (res != SZ_OK || wasInterrupted) + break; + inPrev = inDataSize; + } + } + + { + CMtDecCallbackInfo parse; + + parse.startCall = (prev == NULL); + parse.src = parseData; + parse.srcSize = size; + parse.srcFinished = finish; + parse.canCreateNewThread = True; + + // PRF(printf("\nParse size = %d\n", (unsigned)size)) + + p->mtCallback->Parse(p->mtCallbackObject, t->index, &parse); + + needWrite = True; + canCreateNewThread = parse.canCreateNewThread; + + // printf("\n\n%12I64u %12I64u", (UInt64)p->mtProgress.totalInSize, (UInt64)p->mtProgress.totalOutSize); + + if ( + // parseRes != SZ_OK || + // inDataSize - (size - parse.srcSize) > p->inBlockMax + // || + parse.state == MTDEC_PARSE_OVERFLOW + // || wasInterrupted + ) + { + // Overflow or Parse error - switch from MT decoding to ST decoding + finish = True; + overflow = True; + + { + PRF(printf("\n Overflow")); + // PRF(printf("\nisBlockFinished = %d", (unsigned)parse.blockWasFinished)); + PRF(printf("\n inDataSize = %d", (unsigned)inDataSize)); + } + + if (crossSize != 0) + memcpy(data, parseData, size); + p->crossStart = 0; + p->crossEnd = 0; + break; + } + + if (crossSize != 0) + { + memcpy(data, parseData, parse.srcSize); + p->crossStart += parse.srcSize; + } + + if (parse.state != MTDEC_PARSE_CONTINUE || finish) + { + // we don't need to parse in current thread anymore + + if (parse.state == MTDEC_PARSE_END) + finish = True; + + needCode = True; + // p->crossFinished = finish; + + if (parse.srcSize == size) + { + // full parsed - no cross transfer + p->crossStart = 0; + p->crossEnd = 0; + break; + } + + if (parse.state == MTDEC_PARSE_END) + { + p->crossStart = 0; + p->crossEnd = 0; + + if (crossSize != 0) + memcpy(data + parse.srcSize, parseData + parse.srcSize, size - parse.srcSize); // we need all data + afterEndData_Size = size - parse.srcSize; + afterEndData = parseData + parse.srcSize; + + // we reduce data size to required bytes (parsed only) + inDataSize -= (size - parse.srcSize); + if (!prev) + inDataSize_Start = parse.srcSize; + break; + } + + { + // partial parsed - need cross transfer + if (crossSize != 0) + inDataSize = parse.srcSize; // it's only parsed now + else + { + // partial parsed - is not in initial cross block - we need to copy new data to cross block + Byte *cr = MtDec_GetCrossBuff(p); + if (!cr) + { + { + PRF(printf("\ncross alloc error error\n")); + // res = SZ_ERROR_MEM; + finish = True; + // p->allocError_for_Read_BlockIndex = blockIndex; + isAllocError = True; + break; + } + } + + { + size_t crSize = size - parse.srcSize; + inDataSize -= crSize; + p->crossEnd = crSize; + p->crossStart = 0; + memcpy(cr, parseData + parse.srcSize, crSize); + } + } + + // inDataSize_Full = inDataSize; + if (!prev) + inDataSize_Start = parse.srcSize; // it's partial size (parsed only) + + finish = False; + break; + } + } + + if (parse.srcSize != size) + { + res = SZ_ERROR_FAIL; + PRF(printf("\nfinished error SZ_ERROR_FAIL = %d\n", res)); + break; + } + } + } + + prev = link; + link = link->next; + + if (crossSize != 0) + { + crossSize = 0; + p->crossStart = 0; + p->crossEnd = 0; + } + } + } + + if (res == SZ_OK) + res = MtDec_GetError_Spec(p, blockIndex, &wasInterrupted); + } + + codeRes = SZ_OK; + + if (res == SZ_OK && needCode && !wasInterrupted) + { + codeRes = p->mtCallback->PreCode(p->mtCallbackObject, t->index); + if (codeRes != SZ_OK) + { + needCode = False; + finish = True; + // SZ_ERROR_MEM is expected error here. + // if (codeRes == SZ_ERROR_MEM) - we will try single-thread decoding later. + // if (codeRes != SZ_ERROR_MEM) - we can stop decoding or try single-thread decoding. + } + } + + if (res != SZ_OK || wasInterrupted) + finish = True; + + nextThread = NULL; + threadingErrorSRes = SZ_OK; + + if (!finish) + { + if (p->numStartedThreads < p->numStartedThreads_Limit && canCreateNewThread) + { + SRes res2 = MtDecThread_CreateAndStart(&p->threads[p->numStartedThreads]); + if (res2 == SZ_OK) + { + // if (p->numStartedThreads % 1000 == 0) PRF(printf("\n numStartedThreads=%d\n", p->numStartedThreads)); + p->numStartedThreads++; + } + else + { + PRF(printf("\nERROR: numStartedThreads=%d\n", p->numStartedThreads)); + if (p->numStartedThreads == 1) + { + // if only one thread is possible, we leave muti-threading code + finish = True; + needCode = False; + threadingErrorSRes = res2; + } + else + p->numStartedThreads_Limit = p->numStartedThreads; + } + } + + if (!finish) + { + unsigned nextIndex = t->index + 1; + nextThread = &p->threads[nextIndex >= p->numStartedThreads ? 0 : nextIndex]; + RINOK_THREAD(Event_Set(&nextThread->canRead)) + // We have started executing for new iteration (with next thread) + // And that next thread now is responsible for possible exit from decoding (threading_code) + } + } + + // each call of Event_Set(&nextThread->canRead) must be followed by call of Event_Set(&nextThread->canWrite) + // if ( !finish ) we must call Event_Set(&nextThread->canWrite) in any case + // if ( finish ) we switch to single-thread mode and there are 2 ways at the end of current iteration (current block): + // - if (needContinue) after Write(&needContinue), we restore decoding with new iteration + // - otherwise we stop decoding and exit from ThreadFunc2() + + // Don't change (finish) variable in the further code + + + // ---------- CODE ---------- + + inPrev = 0; + outPrev = 0; + inCodePos = 0; + outCodePos = 0; + + if (res == SZ_OK && needCode && codeRes == SZ_OK) + { + BoolInt isStartBlock = True; + CMtDecBufLink *link = (CMtDecBufLink *)t->inBuf; + + for (;;) + { + size_t inSize; + int stop; + + if (isStartBlock) + inSize = inDataSize_Start; + else + { + UInt64 rem = inDataSize - inCodePos; + inSize = p->inBufSize; + if (inSize > rem) + inSize = (size_t)rem; + } + + inCodePos += inSize; + stop = True; + + codeRes = p->mtCallback->Code(p->mtCallbackObject, t->index, + (const Byte *)MTDEC__DATA_PTR_FROM_LINK(link), inSize, + (inCodePos == inDataSize), // srcFinished + &inCodePos, &outCodePos, &stop); + + if (codeRes != SZ_OK) + { + PRF(printf("\nCode Interrupt error = %x\n", codeRes)); + // we interrupt only later blocks + MtDec_Interrupt(p, blockIndex); + break; + } + + if (stop || inCodePos == inDataSize) + break; + + { + const UInt64 inDelta = inCodePos - inPrev; + const UInt64 outDelta = outCodePos - outPrev; + if (inDelta >= MTDEC_ProgessStep || outDelta >= MTDEC_ProgessStep) + { + // Sleep(1); + res = MtDec_Progress_GetError_Spec(p, inDelta, outDelta, blockIndex, &wasInterrupted); + if (res != SZ_OK || wasInterrupted) + break; + inPrev = inCodePos; + outPrev = outCodePos; + } + } + + link = link->next; + isStartBlock = False; + } + } + + + // ---------- WRITE ---------- + + RINOK_THREAD(Event_Wait(&t->canWrite)); + + { + BoolInt isErrorMode = False; + BoolInt canRecode = True; + BoolInt needWriteToStream = needWrite; + + if (p->exitThread) return 0; // it's never executed in normal cases + + if (p->wasInterrupted) + wasInterrupted = True; + else + { + if (codeRes != SZ_OK) // || !needCode // check it !!! + { + p->wasInterrupted = True; + p->codeRes = codeRes; + if (codeRes == SZ_ERROR_MEM) + isAllocError = True; + } + + if (threadingErrorSRes) + { + p->wasInterrupted = True; + p->threadingErrorSRes = threadingErrorSRes; + needWriteToStream = False; + } + if (isAllocError) + { + p->wasInterrupted = True; + p->isAllocError = True; + needWriteToStream = False; + } + if (overflow) + { + p->wasInterrupted = True; + p->overflow = True; + needWriteToStream = False; + } + } + + if (needCode) + { + if (wasInterrupted) + { + inCodePos = 0; + outCodePos = 0; + } + { + const UInt64 inDelta = inCodePos - inPrev; + const UInt64 outDelta = outCodePos - outPrev; + // if (inDelta != 0 || outDelta != 0) + res = MtProgress_ProgressAdd(&p->mtProgress, inDelta, outDelta); + } + } + + needContinue = (!finish); + + // if (res == SZ_OK && needWrite && !wasInterrupted) + if (needWrite) + { + // p->inProcessed += inCodePos; + + res = p->mtCallback->Write(p->mtCallbackObject, t->index, + res == SZ_OK && needWriteToStream && !wasInterrupted, // needWrite + afterEndData, afterEndData_Size, + &needContinue, + &canRecode); + + // res= E_INVALIDARG; // for test + + PRF(printf("\nAfter Write needContinue = %d\n", (unsigned)needContinue)); + PRF(printf("\nprocessed = %d\n", (unsigned)p->inProcessed)); + + if (res != SZ_OK) + { + PRF(printf("\nWrite error = %d\n", res)); + isErrorMode = True; + p->wasInterrupted = True; + } + if (res != SZ_OK + || (!needContinue && !finish)) + { + PRF(printf("\nWrite Interrupt error = %x\n", res)); + MtDec_Interrupt(p, blockIndex); + } + } + + if (canRecode) + if (!needCode + || res != SZ_OK + || p->wasInterrupted + || codeRes != SZ_OK + || wasInterrupted + || p->numFilledThreads != 0 + || isErrorMode) + { + if (p->numFilledThreads == 0) + p->filledThreadStart = t->index; + if (inDataSize != 0 || !finish) + { + t->inDataSize_Start = inDataSize_Start; + t->inDataSize = inDataSize; + p->numFilledThreads++; + } + PRF(printf("\np->numFilledThreads = %d\n", p->numFilledThreads)); + PRF(printf("p->filledThreadStart = %d\n", p->filledThreadStart)); + } + + if (!finish) + { + RINOK_THREAD(Event_Set(&nextThread->canWrite)); + } + else + { + if (needContinue) + { + // we restore decoding with new iteration + RINOK_THREAD(Event_Set(&p->threads[0].canWrite)); + } + else + { + // we exit from decoding + if (t->index == 0) + return SZ_OK; + p->exitThread = True; + } + RINOK_THREAD(Event_Set(&p->threads[0].canRead)); + } + } + } +} + +#ifdef _WIN32 +#define USE_ALLOCA +#endif + +#ifdef USE_ALLOCA +#ifdef _WIN32 +#include +#else +#include +#endif +#endif + + +static THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc1(void *pp) +{ + WRes res; + + CMtDecThread *t = (CMtDecThread *)pp; + CMtDec *p; + + // fprintf(stdout, "\n%d = %p\n", t->index, &t); + + res = ThreadFunc2(t); + p = t->mtDec; + if (res == 0) + return p->exitThreadWRes; + { + // it's unexpected situation for some threading function error + if (p->exitThreadWRes == 0) + p->exitThreadWRes = res; + PRF(printf("\nthread exit error = %d\n", res)); + p->exitThread = True; + Event_Set(&p->threads[0].canRead); + Event_Set(&p->threads[0].canWrite); + MtProgress_SetError(&p->mtProgress, MY_SRes_HRESULT_FROM_WRes(res)); + } + return res; +} + +static MY_NO_INLINE THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE ThreadFunc(void *pp) +{ + CMtDecThread *t = (CMtDecThread *)pp; + + // fprintf(stderr, "\n%d = %p - before", t->index, &t); + #ifdef USE_ALLOCA + t->allocaPtr = alloca(t->index * 128); + #endif + return ThreadFunc1(pp); +} + + +int MtDec_PrepareRead(CMtDec *p) +{ + if (p->crossBlock && p->crossStart == p->crossEnd) + { + ISzAlloc_Free(p->alloc, p->crossBlock); + p->crossBlock = NULL; + } + + { + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + if (i > p->numStartedThreads + || p->numFilledThreads <= + (i >= p->filledThreadStart ? + i - p->filledThreadStart : + i + p->numStartedThreads - p->filledThreadStart)) + MtDecThread_FreeInBufs(&p->threads[i]); + } + + return (p->numFilledThreads != 0) || (p->crossStart != p->crossEnd); +} + + +const Byte *MtDec_Read(CMtDec *p, size_t *inLim) +{ + while (p->numFilledThreads != 0) + { + CMtDecThread *t = &p->threads[p->filledThreadStart]; + + if (*inLim != 0) + { + { + void *link = t->inBuf; + void *next = ((CMtDecBufLink *)link)->next; + ISzAlloc_Free(p->alloc, link); + t->inBuf = next; + } + + if (t->inDataSize == 0) + { + MtDecThread_FreeInBufs(t); + if (--p->numFilledThreads == 0) + break; + if (++p->filledThreadStart == p->numStartedThreads) + p->filledThreadStart = 0; + t = &p->threads[p->filledThreadStart]; + } + } + + { + size_t lim = t->inDataSize_Start; + if (lim != 0) + t->inDataSize_Start = 0; + else + { + UInt64 rem = t->inDataSize; + lim = p->inBufSize; + if (lim > rem) + lim = (size_t)rem; + } + t->inDataSize -= lim; + *inLim = lim; + return (const Byte *)MTDEC__DATA_PTR_FROM_LINK(t->inBuf); + } + } + + { + size_t crossSize = p->crossEnd - p->crossStart; + if (crossSize != 0) + { + const Byte *data = MTDEC__DATA_PTR_FROM_LINK(p->crossBlock) + p->crossStart; + *inLim = crossSize; + p->crossStart = 0; + p->crossEnd = 0; + return data; + } + *inLim = 0; + if (p->crossBlock) + { + ISzAlloc_Free(p->alloc, p->crossBlock); + p->crossBlock = NULL; + } + return NULL; + } +} + + +void MtDec_Construct(CMtDec *p) +{ + unsigned i; + + p->inBufSize = (size_t)1 << 18; + + p->numThreadsMax = 0; + + p->inStream = NULL; + + // p->inData = NULL; + // p->inDataSize = 0; + + p->crossBlock = NULL; + p->crossStart = 0; + p->crossEnd = 0; + + p->numFilledThreads = 0; + + p->progress = NULL; + p->alloc = NULL; + + p->mtCallback = NULL; + p->mtCallbackObject = NULL; + + p->allocatedBufsSize = 0; + + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CMtDecThread *t = &p->threads[i]; + t->mtDec = p; + t->index = i; + t->inBuf = NULL; + Event_Construct(&t->canRead); + Event_Construct(&t->canWrite); + Thread_Construct(&t->thread); + } + + // Event_Construct(&p->finishedEvent); + + CriticalSection_Init(&p->mtProgress.cs); +} + + +static void MtDec_Free(CMtDec *p) +{ + unsigned i; + + p->exitThread = True; + + for (i = 0; i < MTDEC__THREADS_MAX; i++) + MtDecThread_Destruct(&p->threads[i]); + + // Event_Close(&p->finishedEvent); + + if (p->crossBlock) + { + ISzAlloc_Free(p->alloc, p->crossBlock); + p->crossBlock = NULL; + } +} + + +void MtDec_Destruct(CMtDec *p) +{ + MtDec_Free(p); + + CriticalSection_Delete(&p->mtProgress.cs); +} + + +SRes MtDec_Code(CMtDec *p) +{ + unsigned i; + + p->inProcessed = 0; + + p->blockIndex = 1; // it must be larger than not_defined index (0) + p->isAllocError = False; + p->overflow = False; + p->threadingErrorSRes = SZ_OK; + + p->needContinue = True; + + p->readWasFinished = False; + p->needInterrupt = False; + p->interruptIndex = (UInt64)(Int64)-1; + + p->readProcessed = 0; + p->readRes = SZ_OK; + p->codeRes = SZ_OK; + p->wasInterrupted = False; + + p->crossStart = 0; + p->crossEnd = 0; + + p->filledThreadStart = 0; + p->numFilledThreads = 0; + + { + unsigned numThreads = p->numThreadsMax; + if (numThreads > MTDEC__THREADS_MAX) + numThreads = MTDEC__THREADS_MAX; + p->numStartedThreads_Limit = numThreads; + p->numStartedThreads = 0; + } + + if (p->inBufSize != p->allocatedBufsSize) + { + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CMtDecThread *t = &p->threads[i]; + if (t->inBuf) + MtDecThread_FreeInBufs(t); + } + if (p->crossBlock) + { + ISzAlloc_Free(p->alloc, p->crossBlock); + p->crossBlock = NULL; + } + + p->allocatedBufsSize = p->inBufSize; + } + + MtProgress_Init(&p->mtProgress, p->progress); + + // RINOK_THREAD(ArEvent_OptCreate_And_Reset(&p->finishedEvent)); + p->exitThread = False; + p->exitThreadWRes = 0; + + { + WRes wres; + WRes sres; + CMtDecThread *nextThread = &p->threads[p->numStartedThreads++]; + // wres = MtDecThread_CreateAndStart(nextThread); + wres = MtDecThread_CreateEvents(nextThread); + if (wres == 0) { wres = Event_Set(&nextThread->canWrite); + if (wres == 0) { wres = Event_Set(&nextThread->canRead); + if (wres == 0) { wres = ThreadFunc(nextThread); + if (wres != 0) + { + p->needContinue = False; + MtDec_CloseThreads(p); + }}}} + + // wres = 17; // for test + // wres = Event_Wait(&p->finishedEvent); + + sres = MY_SRes_HRESULT_FROM_WRes(wres); + + if (sres != 0) + p->threadingErrorSRes = sres; + + if ( + // wres == 0 + // wres != 0 + // || p->mtc.codeRes == SZ_ERROR_MEM + p->isAllocError + || p->threadingErrorSRes != SZ_OK + || p->overflow) + { + // p->needContinue = True; + } + else + p->needContinue = False; + + if (p->needContinue) + return SZ_OK; + + // if (sres != SZ_OK) + return sres; + // return E_FAIL; + } +} + +#endif diff --git a/3rdparty/7z/src/MtDec.h b/3rdparty/7z/src/MtDec.h new file mode 100644 index 0000000000..9864cc8741 --- /dev/null +++ b/3rdparty/7z/src/MtDec.h @@ -0,0 +1,201 @@ +/* MtDec.h -- Multi-thread Decoder +2018-07-04 : Igor Pavlov : Public domain */ + +#ifndef __MT_DEC_H +#define __MT_DEC_H + +#include "7zTypes.h" + +#ifndef _7ZIP_ST +#include "Threads.h" +#endif + +EXTERN_C_BEGIN + +#ifndef _7ZIP_ST + +#ifndef _7ZIP_ST + #define MTDEC__THREADS_MAX 32 +#else + #define MTDEC__THREADS_MAX 1 +#endif + + +typedef struct +{ + ICompressProgress *progress; + SRes res; + UInt64 totalInSize; + UInt64 totalOutSize; + CCriticalSection cs; +} CMtProgress; + +void MtProgress_Init(CMtProgress *p, ICompressProgress *progress); +SRes MtProgress_Progress_ST(CMtProgress *p); +SRes MtProgress_ProgressAdd(CMtProgress *p, UInt64 inSize, UInt64 outSize); +SRes MtProgress_GetError(CMtProgress *p); +void MtProgress_SetError(CMtProgress *p, SRes res); + +struct _CMtDec; + +typedef struct +{ + struct _CMtDec *mtDec; + unsigned index; + void *inBuf; + + size_t inDataSize_Start; // size of input data in start block + UInt64 inDataSize; // total size of input data in all blocks + + CThread thread; + CAutoResetEvent canRead; + CAutoResetEvent canWrite; + void *allocaPtr; +} CMtDecThread; + +void MtDecThread_FreeInBufs(CMtDecThread *t); + + +typedef enum +{ + MTDEC_PARSE_CONTINUE, // continue this block with more input data + MTDEC_PARSE_OVERFLOW, // MT buffers overflow, need switch to single-thread + MTDEC_PARSE_NEW, // new block + MTDEC_PARSE_END // end of block threading. But we still can return to threading after Write(&needContinue) +} EMtDecParseState; + +typedef struct +{ + // in + int startCall; + const Byte *src; + size_t srcSize; + // in : (srcSize == 0) is allowed + // out : it's allowed to return less that actually was used ? + int srcFinished; + + // out + EMtDecParseState state; + BoolInt canCreateNewThread; + UInt64 outPos; // check it (size_t) +} CMtDecCallbackInfo; + + +typedef struct +{ + void (*Parse)(void *p, unsigned coderIndex, CMtDecCallbackInfo *ci); + + // PreCode() and Code(): + // (SRes_return_result != SZ_OK) means stop decoding, no need another blocks + SRes (*PreCode)(void *p, unsigned coderIndex); + SRes (*Code)(void *p, unsigned coderIndex, + const Byte *src, size_t srcSize, int srcFinished, + UInt64 *inCodePos, UInt64 *outCodePos, int *stop); + // stop - means stop another Code calls + + + /* Write() must be called, if Parse() was called + set (needWrite) if + { + && (was not interrupted by progress) + && (was not interrupted in previous block) + } + + out: + if (*needContinue), decoder still need to continue decoding with new iteration, + even after MTDEC_PARSE_END + if (*canRecode), we didn't flush current block data, so we still can decode current block later. + */ + SRes (*Write)(void *p, unsigned coderIndex, + BoolInt needWriteToStream, + const Byte *src, size_t srcSize, + // int srcFinished, + BoolInt *needContinue, + BoolInt *canRecode); +} IMtDecCallback; + + + +typedef struct _CMtDec +{ + /* input variables */ + + size_t inBufSize; /* size of input block */ + unsigned numThreadsMax; + // size_t inBlockMax; + unsigned numThreadsMax_2; + + ISeqInStream *inStream; + // const Byte *inData; + // size_t inDataSize; + + ICompressProgress *progress; + ISzAllocPtr alloc; + + IMtDecCallback *mtCallback; + void *mtCallbackObject; + + + /* internal variables */ + + size_t allocatedBufsSize; + + BoolInt exitThread; + WRes exitThreadWRes; + + UInt64 blockIndex; + BoolInt isAllocError; + BoolInt overflow; + SRes threadingErrorSRes; + + BoolInt needContinue; + + // CAutoResetEvent finishedEvent; + + SRes readRes; + SRes codeRes; + + BoolInt wasInterrupted; + + unsigned numStartedThreads_Limit; + unsigned numStartedThreads; + + Byte *crossBlock; + size_t crossStart; + size_t crossEnd; + UInt64 readProcessed; + BoolInt readWasFinished; + UInt64 inProcessed; + + unsigned filledThreadStart; + unsigned numFilledThreads; + + #ifndef _7ZIP_ST + BoolInt needInterrupt; + UInt64 interruptIndex; + CMtProgress mtProgress; + CMtDecThread threads[MTDEC__THREADS_MAX]; + #endif +} CMtDec; + + +void MtDec_Construct(CMtDec *p); +void MtDec_Destruct(CMtDec *p); + +/* +MtDec_Code() returns: + SZ_OK - in most cases + MY_SRes_HRESULT_FROM_WRes(WRes_error) - in case of unexpected error in threading function +*/ + +SRes MtDec_Code(CMtDec *p); +Byte *MtDec_GetCrossBuff(CMtDec *p); + +int MtDec_PrepareRead(CMtDec *p); +const Byte *MtDec_Read(CMtDec *p, size_t *inLim); + +#endif + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Ppmd.h b/3rdparty/7z/src/Ppmd.h new file mode 100644 index 0000000000..4b99415212 --- /dev/null +++ b/3rdparty/7z/src/Ppmd.h @@ -0,0 +1,85 @@ +/* Ppmd.h -- PPMD codec common code +2017-04-03 : Igor Pavlov : Public domain +This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */ + +#ifndef __PPMD_H +#define __PPMD_H + +#include "CpuArch.h" + +EXTERN_C_BEGIN + +#ifdef MY_CPU_32BIT + #define PPMD_32BIT +#endif + +#define PPMD_INT_BITS 7 +#define PPMD_PERIOD_BITS 7 +#define PPMD_BIN_SCALE (1 << (PPMD_INT_BITS + PPMD_PERIOD_BITS)) + +#define PPMD_GET_MEAN_SPEC(summ, shift, round) (((summ) + (1 << ((shift) - (round)))) >> (shift)) +#define PPMD_GET_MEAN(summ) PPMD_GET_MEAN_SPEC((summ), PPMD_PERIOD_BITS, 2) +#define PPMD_UPDATE_PROB_0(prob) ((prob) + (1 << PPMD_INT_BITS) - PPMD_GET_MEAN(prob)) +#define PPMD_UPDATE_PROB_1(prob) ((prob) - PPMD_GET_MEAN(prob)) + +#define PPMD_N1 4 +#define PPMD_N2 4 +#define PPMD_N3 4 +#define PPMD_N4 ((128 + 3 - 1 * PPMD_N1 - 2 * PPMD_N2 - 3 * PPMD_N3) / 4) +#define PPMD_NUM_INDEXES (PPMD_N1 + PPMD_N2 + PPMD_N3 + PPMD_N4) + +#pragma pack(push, 1) +/* Most compilers works OK here even without #pragma pack(push, 1), but some GCC compilers need it. */ + +/* SEE-contexts for PPM-contexts with masked symbols */ +typedef struct +{ + UInt16 Summ; /* Freq */ + Byte Shift; /* Speed of Freq change; low Shift is for fast change */ + Byte Count; /* Count to next change of Shift */ +} CPpmd_See; + +#define Ppmd_See_Update(p) if ((p)->Shift < PPMD_PERIOD_BITS && --(p)->Count == 0) \ + { (p)->Summ <<= 1; (p)->Count = (Byte)(3 << (p)->Shift++); } + +typedef struct +{ + Byte Symbol; + Byte Freq; + UInt16 SuccessorLow; + UInt16 SuccessorHigh; +} CPpmd_State; + +#pragma pack(pop) + +typedef + #ifdef PPMD_32BIT + CPpmd_State * + #else + UInt32 + #endif + CPpmd_State_Ref; + +typedef + #ifdef PPMD_32BIT + void * + #else + UInt32 + #endif + CPpmd_Void_Ref; + +typedef + #ifdef PPMD_32BIT + Byte * + #else + UInt32 + #endif + CPpmd_Byte_Ref; + +#define PPMD_SetAllBitsIn256Bytes(p) \ + { size_t z; for (z = 0; z < 256 / sizeof(p[0]); z += 8) { \ + p[z+7] = p[z+6] = p[z+5] = p[z+4] = p[z+3] = p[z+2] = p[z+1] = p[z+0] = ~(size_t)0; }} + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Ppmd7.c b/3rdparty/7z/src/Ppmd7.c new file mode 100644 index 0000000000..80e7de9a63 --- /dev/null +++ b/3rdparty/7z/src/Ppmd7.c @@ -0,0 +1,712 @@ +/* Ppmd7.c -- PPMdH codec +2018-07-04 : Igor Pavlov : Public domain +This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */ + +#include "Precomp.h" + +#include + +#include "Ppmd7.h" + +const Byte PPMD7_kExpEscape[16] = { 25, 14, 9, 7, 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2 }; +static const UInt16 kInitBinEsc[] = { 0x3CDD, 0x1F3F, 0x59BF, 0x48F3, 0x64A1, 0x5ABC, 0x6632, 0x6051}; + +#define MAX_FREQ 124 +#define UNIT_SIZE 12 + +#define U2B(nu) ((UInt32)(nu) * UNIT_SIZE) +#define U2I(nu) (p->Units2Indx[(size_t)(nu) - 1]) +#define I2U(indx) (p->Indx2Units[indx]) + +#ifdef PPMD_32BIT + #define REF(ptr) (ptr) +#else + #define REF(ptr) ((UInt32)((Byte *)(ptr) - (p)->Base)) +#endif + +#define STATS_REF(ptr) ((CPpmd_State_Ref)REF(ptr)) + +#define CTX(ref) ((CPpmd7_Context *)Ppmd7_GetContext(p, ref)) +#define STATS(ctx) Ppmd7_GetStats(p, ctx) +#define ONE_STATE(ctx) Ppmd7Context_OneState(ctx) +#define SUFFIX(ctx) CTX((ctx)->Suffix) + +typedef CPpmd7_Context * CTX_PTR; + +struct CPpmd7_Node_; + +typedef + #ifdef PPMD_32BIT + struct CPpmd7_Node_ * + #else + UInt32 + #endif + CPpmd7_Node_Ref; + +typedef struct CPpmd7_Node_ +{ + UInt16 Stamp; /* must be at offset 0 as CPpmd7_Context::NumStats. Stamp=0 means free */ + UInt16 NU; + CPpmd7_Node_Ref Next; /* must be at offset >= 4 */ + CPpmd7_Node_Ref Prev; +} CPpmd7_Node; + +#ifdef PPMD_32BIT + #define NODE(ptr) (ptr) +#else + #define NODE(offs) ((CPpmd7_Node *)(p->Base + (offs))) +#endif + +void Ppmd7_Construct(CPpmd7 *p) +{ + unsigned i, k, m; + + p->Base = 0; + + for (i = 0, k = 0; i < PPMD_NUM_INDEXES; i++) + { + unsigned step = (i >= 12 ? 4 : (i >> 2) + 1); + do { p->Units2Indx[k++] = (Byte)i; } while (--step); + p->Indx2Units[i] = (Byte)k; + } + + p->NS2BSIndx[0] = (0 << 1); + p->NS2BSIndx[1] = (1 << 1); + memset(p->NS2BSIndx + 2, (2 << 1), 9); + memset(p->NS2BSIndx + 11, (3 << 1), 256 - 11); + + for (i = 0; i < 3; i++) + p->NS2Indx[i] = (Byte)i; + for (m = i, k = 1; i < 256; i++) + { + p->NS2Indx[i] = (Byte)m; + if (--k == 0) + k = (++m) - 2; + } + + memset(p->HB2Flag, 0, 0x40); + memset(p->HB2Flag + 0x40, 8, 0x100 - 0x40); +} + +void Ppmd7_Free(CPpmd7 *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->Base); + p->Size = 0; + p->Base = 0; +} + +BoolInt Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAllocPtr alloc) +{ + if (!p->Base || p->Size != size) + { + size_t size2; + Ppmd7_Free(p, alloc); + size2 = 0 + #ifndef PPMD_32BIT + + UNIT_SIZE + #endif + ; + p->AlignOffset = + #ifdef PPMD_32BIT + (4 - size) & 3; + #else + 4 - (size & 3); + #endif + if ((p->Base = (Byte *)ISzAlloc_Alloc(alloc, p->AlignOffset + size + size2)) == 0) + return False; + p->Size = size; + } + return True; +} + +static void InsertNode(CPpmd7 *p, void *node, unsigned indx) +{ + *((CPpmd_Void_Ref *)node) = p->FreeList[indx]; + p->FreeList[indx] = REF(node); +} + +static void *RemoveNode(CPpmd7 *p, unsigned indx) +{ + CPpmd_Void_Ref *node = (CPpmd_Void_Ref *)Ppmd7_GetPtr(p, p->FreeList[indx]); + p->FreeList[indx] = *node; + return node; +} + +static void SplitBlock(CPpmd7 *p, void *ptr, unsigned oldIndx, unsigned newIndx) +{ + unsigned i, nu = I2U(oldIndx) - I2U(newIndx); + ptr = (Byte *)ptr + U2B(I2U(newIndx)); + if (I2U(i = U2I(nu)) != nu) + { + unsigned k = I2U(--i); + InsertNode(p, ((Byte *)ptr) + U2B(k), nu - k - 1); + } + InsertNode(p, ptr, i); +} + +static void GlueFreeBlocks(CPpmd7 *p) +{ + #ifdef PPMD_32BIT + CPpmd7_Node headItem; + CPpmd7_Node_Ref head = &headItem; + #else + CPpmd7_Node_Ref head = p->AlignOffset + p->Size; + #endif + + CPpmd7_Node_Ref n = head; + unsigned i; + + p->GlueCount = 255; + + /* create doubly-linked list of free blocks */ + for (i = 0; i < PPMD_NUM_INDEXES; i++) + { + UInt16 nu = I2U(i); + CPpmd7_Node_Ref next = (CPpmd7_Node_Ref)p->FreeList[i]; + p->FreeList[i] = 0; + while (next != 0) + { + CPpmd7_Node *node = NODE(next); + node->Next = n; + n = NODE(n)->Prev = next; + next = *(const CPpmd7_Node_Ref *)node; + node->Stamp = 0; + node->NU = (UInt16)nu; + } + } + NODE(head)->Stamp = 1; + NODE(head)->Next = n; + NODE(n)->Prev = head; + if (p->LoUnit != p->HiUnit) + ((CPpmd7_Node *)p->LoUnit)->Stamp = 1; + + /* Glue free blocks */ + while (n != head) + { + CPpmd7_Node *node = NODE(n); + UInt32 nu = (UInt32)node->NU; + for (;;) + { + CPpmd7_Node *node2 = NODE(n) + nu; + nu += node2->NU; + if (node2->Stamp != 0 || nu >= 0x10000) + break; + NODE(node2->Prev)->Next = node2->Next; + NODE(node2->Next)->Prev = node2->Prev; + node->NU = (UInt16)nu; + } + n = node->Next; + } + + /* Fill lists of free blocks */ + for (n = NODE(head)->Next; n != head;) + { + CPpmd7_Node *node = NODE(n); + unsigned nu; + CPpmd7_Node_Ref next = node->Next; + for (nu = node->NU; nu > 128; nu -= 128, node += 128) + InsertNode(p, node, PPMD_NUM_INDEXES - 1); + if (I2U(i = U2I(nu)) != nu) + { + unsigned k = I2U(--i); + InsertNode(p, node + k, nu - k - 1); + } + InsertNode(p, node, i); + n = next; + } +} + +static void *AllocUnitsRare(CPpmd7 *p, unsigned indx) +{ + unsigned i; + void *retVal; + if (p->GlueCount == 0) + { + GlueFreeBlocks(p); + if (p->FreeList[indx] != 0) + return RemoveNode(p, indx); + } + i = indx; + do + { + if (++i == PPMD_NUM_INDEXES) + { + UInt32 numBytes = U2B(I2U(indx)); + p->GlueCount--; + return ((UInt32)(p->UnitsStart - p->Text) > numBytes) ? (p->UnitsStart -= numBytes) : (NULL); + } + } + while (p->FreeList[i] == 0); + retVal = RemoveNode(p, i); + SplitBlock(p, retVal, i, indx); + return retVal; +} + +static void *AllocUnits(CPpmd7 *p, unsigned indx) +{ + UInt32 numBytes; + if (p->FreeList[indx] != 0) + return RemoveNode(p, indx); + numBytes = U2B(I2U(indx)); + if (numBytes <= (UInt32)(p->HiUnit - p->LoUnit)) + { + void *retVal = p->LoUnit; + p->LoUnit += numBytes; + return retVal; + } + return AllocUnitsRare(p, indx); +} + +#define MyMem12Cpy(dest, src, num) \ + { UInt32 *d = (UInt32 *)dest; const UInt32 *s = (const UInt32 *)src; UInt32 n = num; \ + do { d[0] = s[0]; d[1] = s[1]; d[2] = s[2]; s += 3; d += 3; } while (--n); } + +static void *ShrinkUnits(CPpmd7 *p, void *oldPtr, unsigned oldNU, unsigned newNU) +{ + unsigned i0 = U2I(oldNU); + unsigned i1 = U2I(newNU); + if (i0 == i1) + return oldPtr; + if (p->FreeList[i1] != 0) + { + void *ptr = RemoveNode(p, i1); + MyMem12Cpy(ptr, oldPtr, newNU); + InsertNode(p, oldPtr, i0); + return ptr; + } + SplitBlock(p, oldPtr, i0, i1); + return oldPtr; +} + +#define SUCCESSOR(p) ((CPpmd_Void_Ref)((p)->SuccessorLow | ((UInt32)(p)->SuccessorHigh << 16))) + +static void SetSuccessor(CPpmd_State *p, CPpmd_Void_Ref v) +{ + (p)->SuccessorLow = (UInt16)((UInt32)(v) & 0xFFFF); + (p)->SuccessorHigh = (UInt16)(((UInt32)(v) >> 16) & 0xFFFF); +} + +static void RestartModel(CPpmd7 *p) +{ + unsigned i, k, m; + + memset(p->FreeList, 0, sizeof(p->FreeList)); + p->Text = p->Base + p->AlignOffset; + p->HiUnit = p->Text + p->Size; + p->LoUnit = p->UnitsStart = p->HiUnit - p->Size / 8 / UNIT_SIZE * 7 * UNIT_SIZE; + p->GlueCount = 0; + + p->OrderFall = p->MaxOrder; + p->RunLength = p->InitRL = -(Int32)((p->MaxOrder < 12) ? p->MaxOrder : 12) - 1; + p->PrevSuccess = 0; + + p->MinContext = p->MaxContext = (CTX_PTR)(p->HiUnit -= UNIT_SIZE); /* AllocContext(p); */ + p->MinContext->Suffix = 0; + p->MinContext->NumStats = 256; + p->MinContext->SummFreq = 256 + 1; + p->FoundState = (CPpmd_State *)p->LoUnit; /* AllocUnits(p, PPMD_NUM_INDEXES - 1); */ + p->LoUnit += U2B(256 / 2); + p->MinContext->Stats = REF(p->FoundState); + for (i = 0; i < 256; i++) + { + CPpmd_State *s = &p->FoundState[i]; + s->Symbol = (Byte)i; + s->Freq = 1; + SetSuccessor(s, 0); + } + + for (i = 0; i < 128; i++) + for (k = 0; k < 8; k++) + { + UInt16 *dest = p->BinSumm[i] + k; + UInt16 val = (UInt16)(PPMD_BIN_SCALE - kInitBinEsc[k] / (i + 2)); + for (m = 0; m < 64; m += 8) + dest[m] = val; + } + + for (i = 0; i < 25; i++) + for (k = 0; k < 16; k++) + { + CPpmd_See *s = &p->See[i][k]; + s->Summ = (UInt16)((5 * i + 10) << (s->Shift = PPMD_PERIOD_BITS - 4)); + s->Count = 4; + } +} + +void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder) +{ + p->MaxOrder = maxOrder; + RestartModel(p); + p->DummySee.Shift = PPMD_PERIOD_BITS; + p->DummySee.Summ = 0; /* unused */ + p->DummySee.Count = 64; /* unused */ +} + +static CTX_PTR CreateSuccessors(CPpmd7 *p, BoolInt skip) +{ + CPpmd_State upState; + CTX_PTR c = p->MinContext; + CPpmd_Byte_Ref upBranch = (CPpmd_Byte_Ref)SUCCESSOR(p->FoundState); + CPpmd_State *ps[PPMD7_MAX_ORDER]; + unsigned numPs = 0; + + if (!skip) + ps[numPs++] = p->FoundState; + + while (c->Suffix) + { + CPpmd_Void_Ref successor; + CPpmd_State *s; + c = SUFFIX(c); + if (c->NumStats != 1) + { + for (s = STATS(c); s->Symbol != p->FoundState->Symbol; s++); + } + else + s = ONE_STATE(c); + successor = SUCCESSOR(s); + if (successor != upBranch) + { + c = CTX(successor); + if (numPs == 0) + return c; + break; + } + ps[numPs++] = s; + } + + upState.Symbol = *(const Byte *)Ppmd7_GetPtr(p, upBranch); + SetSuccessor(&upState, upBranch + 1); + + if (c->NumStats == 1) + upState.Freq = ONE_STATE(c)->Freq; + else + { + UInt32 cf, s0; + CPpmd_State *s; + for (s = STATS(c); s->Symbol != upState.Symbol; s++); + cf = s->Freq - 1; + s0 = c->SummFreq - c->NumStats - cf; + upState.Freq = (Byte)(1 + ((2 * cf <= s0) ? (5 * cf > s0) : ((2 * cf + 3 * s0 - 1) / (2 * s0)))); + } + + do + { + /* Create Child */ + CTX_PTR c1; /* = AllocContext(p); */ + if (p->HiUnit != p->LoUnit) + c1 = (CTX_PTR)(p->HiUnit -= UNIT_SIZE); + else if (p->FreeList[0] != 0) + c1 = (CTX_PTR)RemoveNode(p, 0); + else + { + c1 = (CTX_PTR)AllocUnitsRare(p, 0); + if (!c1) + return NULL; + } + c1->NumStats = 1; + *ONE_STATE(c1) = upState; + c1->Suffix = REF(c); + SetSuccessor(ps[--numPs], REF(c1)); + c = c1; + } + while (numPs != 0); + + return c; +} + +static void SwapStates(CPpmd_State *t1, CPpmd_State *t2) +{ + CPpmd_State tmp = *t1; + *t1 = *t2; + *t2 = tmp; +} + +static void UpdateModel(CPpmd7 *p) +{ + CPpmd_Void_Ref successor, fSuccessor = SUCCESSOR(p->FoundState); + CTX_PTR c; + unsigned s0, ns; + + if (p->FoundState->Freq < MAX_FREQ / 4 && p->MinContext->Suffix != 0) + { + c = SUFFIX(p->MinContext); + + if (c->NumStats == 1) + { + CPpmd_State *s = ONE_STATE(c); + if (s->Freq < 32) + s->Freq++; + } + else + { + CPpmd_State *s = STATS(c); + if (s->Symbol != p->FoundState->Symbol) + { + do { s++; } while (s->Symbol != p->FoundState->Symbol); + if (s[0].Freq >= s[-1].Freq) + { + SwapStates(&s[0], &s[-1]); + s--; + } + } + if (s->Freq < MAX_FREQ - 9) + { + s->Freq += 2; + c->SummFreq += 2; + } + } + } + + if (p->OrderFall == 0) + { + p->MinContext = p->MaxContext = CreateSuccessors(p, True); + if (p->MinContext == 0) + { + RestartModel(p); + return; + } + SetSuccessor(p->FoundState, REF(p->MinContext)); + return; + } + + *p->Text++ = p->FoundState->Symbol; + successor = REF(p->Text); + if (p->Text >= p->UnitsStart) + { + RestartModel(p); + return; + } + + if (fSuccessor) + { + if (fSuccessor <= successor) + { + CTX_PTR cs = CreateSuccessors(p, False); + if (cs == NULL) + { + RestartModel(p); + return; + } + fSuccessor = REF(cs); + } + if (--p->OrderFall == 0) + { + successor = fSuccessor; + p->Text -= (p->MaxContext != p->MinContext); + } + } + else + { + SetSuccessor(p->FoundState, successor); + fSuccessor = REF(p->MinContext); + } + + s0 = p->MinContext->SummFreq - (ns = p->MinContext->NumStats) - (p->FoundState->Freq - 1); + + for (c = p->MaxContext; c != p->MinContext; c = SUFFIX(c)) + { + unsigned ns1; + UInt32 cf, sf; + if ((ns1 = c->NumStats) != 1) + { + if ((ns1 & 1) == 0) + { + /* Expand for one UNIT */ + unsigned oldNU = ns1 >> 1; + unsigned i = U2I(oldNU); + if (i != U2I((size_t)oldNU + 1)) + { + void *ptr = AllocUnits(p, i + 1); + void *oldPtr; + if (!ptr) + { + RestartModel(p); + return; + } + oldPtr = STATS(c); + MyMem12Cpy(ptr, oldPtr, oldNU); + InsertNode(p, oldPtr, i); + c->Stats = STATS_REF(ptr); + } + } + c->SummFreq = (UInt16)(c->SummFreq + (2 * ns1 < ns) + 2 * ((4 * ns1 <= ns) & (c->SummFreq <= 8 * ns1))); + } + else + { + CPpmd_State *s = (CPpmd_State*)AllocUnits(p, 0); + if (!s) + { + RestartModel(p); + return; + } + *s = *ONE_STATE(c); + c->Stats = REF(s); + if (s->Freq < MAX_FREQ / 4 - 1) + s->Freq <<= 1; + else + s->Freq = MAX_FREQ - 4; + c->SummFreq = (UInt16)(s->Freq + p->InitEsc + (ns > 3)); + } + cf = 2 * (UInt32)p->FoundState->Freq * (c->SummFreq + 6); + sf = (UInt32)s0 + c->SummFreq; + if (cf < 6 * sf) + { + cf = 1 + (cf > sf) + (cf >= 4 * sf); + c->SummFreq += 3; + } + else + { + cf = 4 + (cf >= 9 * sf) + (cf >= 12 * sf) + (cf >= 15 * sf); + c->SummFreq = (UInt16)(c->SummFreq + cf); + } + { + CPpmd_State *s = STATS(c) + ns1; + SetSuccessor(s, successor); + s->Symbol = p->FoundState->Symbol; + s->Freq = (Byte)cf; + c->NumStats = (UInt16)(ns1 + 1); + } + } + p->MaxContext = p->MinContext = CTX(fSuccessor); +} + +static void Rescale(CPpmd7 *p) +{ + unsigned i, adder, sumFreq, escFreq; + CPpmd_State *stats = STATS(p->MinContext); + CPpmd_State *s = p->FoundState; + { + CPpmd_State tmp = *s; + for (; s != stats; s--) + s[0] = s[-1]; + *s = tmp; + } + escFreq = p->MinContext->SummFreq - s->Freq; + s->Freq += 4; + adder = (p->OrderFall != 0); + s->Freq = (Byte)((s->Freq + adder) >> 1); + sumFreq = s->Freq; + + i = p->MinContext->NumStats - 1; + do + { + escFreq -= (++s)->Freq; + s->Freq = (Byte)((s->Freq + adder) >> 1); + sumFreq += s->Freq; + if (s[0].Freq > s[-1].Freq) + { + CPpmd_State *s1 = s; + CPpmd_State tmp = *s1; + do + s1[0] = s1[-1]; + while (--s1 != stats && tmp.Freq > s1[-1].Freq); + *s1 = tmp; + } + } + while (--i); + + if (s->Freq == 0) + { + unsigned numStats = p->MinContext->NumStats; + unsigned n0, n1; + do { i++; } while ((--s)->Freq == 0); + escFreq += i; + p->MinContext->NumStats = (UInt16)(p->MinContext->NumStats - i); + if (p->MinContext->NumStats == 1) + { + CPpmd_State tmp = *stats; + do + { + tmp.Freq = (Byte)(tmp.Freq - (tmp.Freq >> 1)); + escFreq >>= 1; + } + while (escFreq > 1); + InsertNode(p, stats, U2I(((numStats + 1) >> 1))); + *(p->FoundState = ONE_STATE(p->MinContext)) = tmp; + return; + } + n0 = (numStats + 1) >> 1; + n1 = (p->MinContext->NumStats + 1) >> 1; + if (n0 != n1) + p->MinContext->Stats = STATS_REF(ShrinkUnits(p, stats, n0, n1)); + } + p->MinContext->SummFreq = (UInt16)(sumFreq + escFreq - (escFreq >> 1)); + p->FoundState = STATS(p->MinContext); +} + +CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *escFreq) +{ + CPpmd_See *see; + unsigned nonMasked = p->MinContext->NumStats - numMasked; + if (p->MinContext->NumStats != 256) + { + see = p->See[(unsigned)p->NS2Indx[(size_t)nonMasked - 1]] + + (nonMasked < (unsigned)SUFFIX(p->MinContext)->NumStats - p->MinContext->NumStats) + + 2 * (unsigned)(p->MinContext->SummFreq < 11 * p->MinContext->NumStats) + + 4 * (unsigned)(numMasked > nonMasked) + + p->HiBitsFlag; + { + unsigned r = (see->Summ >> see->Shift); + see->Summ = (UInt16)(see->Summ - r); + *escFreq = r + (r == 0); + } + } + else + { + see = &p->DummySee; + *escFreq = 1; + } + return see; +} + +static void NextContext(CPpmd7 *p) +{ + CTX_PTR c = CTX(SUCCESSOR(p->FoundState)); + if (p->OrderFall == 0 && (Byte *)c > p->Text) + p->MinContext = p->MaxContext = c; + else + UpdateModel(p); +} + +void Ppmd7_Update1(CPpmd7 *p) +{ + CPpmd_State *s = p->FoundState; + s->Freq += 4; + p->MinContext->SummFreq += 4; + if (s[0].Freq > s[-1].Freq) + { + SwapStates(&s[0], &s[-1]); + p->FoundState = --s; + if (s->Freq > MAX_FREQ) + Rescale(p); + } + NextContext(p); +} + +void Ppmd7_Update1_0(CPpmd7 *p) +{ + p->PrevSuccess = (2 * p->FoundState->Freq > p->MinContext->SummFreq); + p->RunLength += p->PrevSuccess; + p->MinContext->SummFreq += 4; + if ((p->FoundState->Freq += 4) > MAX_FREQ) + Rescale(p); + NextContext(p); +} + +void Ppmd7_UpdateBin(CPpmd7 *p) +{ + p->FoundState->Freq = (Byte)(p->FoundState->Freq + (p->FoundState->Freq < 128 ? 1: 0)); + p->PrevSuccess = 1; + p->RunLength++; + NextContext(p); +} + +void Ppmd7_Update2(CPpmd7 *p) +{ + p->MinContext->SummFreq += 4; + if ((p->FoundState->Freq += 4) > MAX_FREQ) + Rescale(p); + p->RunLength = p->InitRL; + UpdateModel(p); +} diff --git a/3rdparty/7z/src/Ppmd7.h b/3rdparty/7z/src/Ppmd7.h new file mode 100644 index 0000000000..cce93f1209 --- /dev/null +++ b/3rdparty/7z/src/Ppmd7.h @@ -0,0 +1,142 @@ +/* Ppmd7.h -- PPMdH compression codec +2018-07-04 : Igor Pavlov : Public domain +This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */ + +/* This code supports virtual RangeDecoder and includes the implementation +of RangeCoder from 7z, instead of RangeCoder from original PPMd var.H. +If you need the compatibility with original PPMd var.H, you can use external RangeDecoder */ + +#ifndef __PPMD7_H +#define __PPMD7_H + +#include "Ppmd.h" + +EXTERN_C_BEGIN + +#define PPMD7_MIN_ORDER 2 +#define PPMD7_MAX_ORDER 64 + +#define PPMD7_MIN_MEM_SIZE (1 << 11) +#define PPMD7_MAX_MEM_SIZE (0xFFFFFFFF - 12 * 3) + +struct CPpmd7_Context_; + +typedef + #ifdef PPMD_32BIT + struct CPpmd7_Context_ * + #else + UInt32 + #endif + CPpmd7_Context_Ref; + +typedef struct CPpmd7_Context_ +{ + UInt16 NumStats; + UInt16 SummFreq; + CPpmd_State_Ref Stats; + CPpmd7_Context_Ref Suffix; +} CPpmd7_Context; + +#define Ppmd7Context_OneState(p) ((CPpmd_State *)&(p)->SummFreq) + +typedef struct +{ + CPpmd7_Context *MinContext, *MaxContext; + CPpmd_State *FoundState; + unsigned OrderFall, InitEsc, PrevSuccess, MaxOrder, HiBitsFlag; + Int32 RunLength, InitRL; /* must be 32-bit at least */ + + UInt32 Size; + UInt32 GlueCount; + Byte *Base, *LoUnit, *HiUnit, *Text, *UnitsStart; + UInt32 AlignOffset; + + Byte Indx2Units[PPMD_NUM_INDEXES]; + Byte Units2Indx[128]; + CPpmd_Void_Ref FreeList[PPMD_NUM_INDEXES]; + Byte NS2Indx[256], NS2BSIndx[256], HB2Flag[256]; + CPpmd_See DummySee, See[25][16]; + UInt16 BinSumm[128][64]; +} CPpmd7; + +void Ppmd7_Construct(CPpmd7 *p); +BoolInt Ppmd7_Alloc(CPpmd7 *p, UInt32 size, ISzAllocPtr alloc); +void Ppmd7_Free(CPpmd7 *p, ISzAllocPtr alloc); +void Ppmd7_Init(CPpmd7 *p, unsigned maxOrder); +#define Ppmd7_WasAllocated(p) ((p)->Base != NULL) + + +/* ---------- Internal Functions ---------- */ + +extern const Byte PPMD7_kExpEscape[16]; + +#ifdef PPMD_32BIT + #define Ppmd7_GetPtr(p, ptr) (ptr) + #define Ppmd7_GetContext(p, ptr) (ptr) + #define Ppmd7_GetStats(p, ctx) ((ctx)->Stats) +#else + #define Ppmd7_GetPtr(p, offs) ((void *)((p)->Base + (offs))) + #define Ppmd7_GetContext(p, offs) ((CPpmd7_Context *)Ppmd7_GetPtr((p), (offs))) + #define Ppmd7_GetStats(p, ctx) ((CPpmd_State *)Ppmd7_GetPtr((p), ((ctx)->Stats))) +#endif + +void Ppmd7_Update1(CPpmd7 *p); +void Ppmd7_Update1_0(CPpmd7 *p); +void Ppmd7_Update2(CPpmd7 *p); +void Ppmd7_UpdateBin(CPpmd7 *p); + +#define Ppmd7_GetBinSumm(p) \ + &p->BinSumm[(size_t)(unsigned)Ppmd7Context_OneState(p->MinContext)->Freq - 1][p->PrevSuccess + \ + p->NS2BSIndx[(size_t)Ppmd7_GetContext(p, p->MinContext->Suffix)->NumStats - 1] + \ + (p->HiBitsFlag = p->HB2Flag[p->FoundState->Symbol]) + \ + 2 * p->HB2Flag[(unsigned)Ppmd7Context_OneState(p->MinContext)->Symbol] + \ + ((p->RunLength >> 26) & 0x20)] + +CPpmd_See *Ppmd7_MakeEscFreq(CPpmd7 *p, unsigned numMasked, UInt32 *scale); + + +/* ---------- Decode ---------- */ + +typedef struct IPpmd7_RangeDec IPpmd7_RangeDec; + +struct IPpmd7_RangeDec +{ + UInt32 (*GetThreshold)(const IPpmd7_RangeDec *p, UInt32 total); + void (*Decode)(const IPpmd7_RangeDec *p, UInt32 start, UInt32 size); + UInt32 (*DecodeBit)(const IPpmd7_RangeDec *p, UInt32 size0); +}; + +typedef struct +{ + IPpmd7_RangeDec vt; + UInt32 Range; + UInt32 Code; + IByteIn *Stream; +} CPpmd7z_RangeDec; + +void Ppmd7z_RangeDec_CreateVTable(CPpmd7z_RangeDec *p); +BoolInt Ppmd7z_RangeDec_Init(CPpmd7z_RangeDec *p); +#define Ppmd7z_RangeDec_IsFinishedOK(p) ((p)->Code == 0) + +int Ppmd7_DecodeSymbol(CPpmd7 *p, const IPpmd7_RangeDec *rc); + + +/* ---------- Encode ---------- */ + +typedef struct +{ + UInt64 Low; + UInt32 Range; + Byte Cache; + UInt64 CacheSize; + IByteOut *Stream; +} CPpmd7z_RangeEnc; + +void Ppmd7z_RangeEnc_Init(CPpmd7z_RangeEnc *p); +void Ppmd7z_RangeEnc_FlushData(CPpmd7z_RangeEnc *p); + +void Ppmd7_EncodeSymbol(CPpmd7 *p, CPpmd7z_RangeEnc *rc, int symbol); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Ppmd7Dec.c b/3rdparty/7z/src/Ppmd7Dec.c new file mode 100644 index 0000000000..2026407108 --- /dev/null +++ b/3rdparty/7z/src/Ppmd7Dec.c @@ -0,0 +1,191 @@ +/* Ppmd7Dec.c -- PPMdH Decoder +2018-07-04 : Igor Pavlov : Public domain +This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */ + +#include "Precomp.h" + +#include "Ppmd7.h" + +#define kTopValue (1 << 24) + +BoolInt Ppmd7z_RangeDec_Init(CPpmd7z_RangeDec *p) +{ + unsigned i; + p->Code = 0; + p->Range = 0xFFFFFFFF; + if (IByteIn_Read(p->Stream) != 0) + return False; + for (i = 0; i < 4; i++) + p->Code = (p->Code << 8) | IByteIn_Read(p->Stream); + return (p->Code < 0xFFFFFFFF); +} + +#define GET_Ppmd7z_RangeDec CPpmd7z_RangeDec *p = CONTAINER_FROM_VTBL(pp, CPpmd7z_RangeDec, vt); + +static UInt32 Range_GetThreshold(const IPpmd7_RangeDec *pp, UInt32 total) +{ + GET_Ppmd7z_RangeDec + return p->Code / (p->Range /= total); +} + +static void Range_Normalize(CPpmd7z_RangeDec *p) +{ + if (p->Range < kTopValue) + { + p->Code = (p->Code << 8) | IByteIn_Read(p->Stream); + p->Range <<= 8; + if (p->Range < kTopValue) + { + p->Code = (p->Code << 8) | IByteIn_Read(p->Stream); + p->Range <<= 8; + } + } +} + +static void Range_Decode(const IPpmd7_RangeDec *pp, UInt32 start, UInt32 size) +{ + GET_Ppmd7z_RangeDec + p->Code -= start * p->Range; + p->Range *= size; + Range_Normalize(p); +} + +static UInt32 Range_DecodeBit(const IPpmd7_RangeDec *pp, UInt32 size0) +{ + GET_Ppmd7z_RangeDec + UInt32 newBound = (p->Range >> 14) * size0; + UInt32 symbol; + if (p->Code < newBound) + { + symbol = 0; + p->Range = newBound; + } + else + { + symbol = 1; + p->Code -= newBound; + p->Range -= newBound; + } + Range_Normalize(p); + return symbol; +} + +void Ppmd7z_RangeDec_CreateVTable(CPpmd7z_RangeDec *p) +{ + p->vt.GetThreshold = Range_GetThreshold; + p->vt.Decode = Range_Decode; + p->vt.DecodeBit = Range_DecodeBit; +} + + +#define MASK(sym) ((signed char *)charMask)[sym] + +int Ppmd7_DecodeSymbol(CPpmd7 *p, const IPpmd7_RangeDec *rc) +{ + size_t charMask[256 / sizeof(size_t)]; + if (p->MinContext->NumStats != 1) + { + CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext); + unsigned i; + UInt32 count, hiCnt; + if ((count = rc->GetThreshold(rc, p->MinContext->SummFreq)) < (hiCnt = s->Freq)) + { + Byte symbol; + rc->Decode(rc, 0, s->Freq); + p->FoundState = s; + symbol = s->Symbol; + Ppmd7_Update1_0(p); + return symbol; + } + p->PrevSuccess = 0; + i = p->MinContext->NumStats - 1; + do + { + if ((hiCnt += (++s)->Freq) > count) + { + Byte symbol; + rc->Decode(rc, hiCnt - s->Freq, s->Freq); + p->FoundState = s; + symbol = s->Symbol; + Ppmd7_Update1(p); + return symbol; + } + } + while (--i); + if (count >= p->MinContext->SummFreq) + return -2; + p->HiBitsFlag = p->HB2Flag[p->FoundState->Symbol]; + rc->Decode(rc, hiCnt, p->MinContext->SummFreq - hiCnt); + PPMD_SetAllBitsIn256Bytes(charMask); + MASK(s->Symbol) = 0; + i = p->MinContext->NumStats - 1; + do { MASK((--s)->Symbol) = 0; } while (--i); + } + else + { + UInt16 *prob = Ppmd7_GetBinSumm(p); + if (rc->DecodeBit(rc, *prob) == 0) + { + Byte symbol; + *prob = (UInt16)PPMD_UPDATE_PROB_0(*prob); + symbol = (p->FoundState = Ppmd7Context_OneState(p->MinContext))->Symbol; + Ppmd7_UpdateBin(p); + return symbol; + } + *prob = (UInt16)PPMD_UPDATE_PROB_1(*prob); + p->InitEsc = PPMD7_kExpEscape[*prob >> 10]; + PPMD_SetAllBitsIn256Bytes(charMask); + MASK(Ppmd7Context_OneState(p->MinContext)->Symbol) = 0; + p->PrevSuccess = 0; + } + for (;;) + { + CPpmd_State *ps[256], *s; + UInt32 freqSum, count, hiCnt; + CPpmd_See *see; + unsigned i, num, numMasked = p->MinContext->NumStats; + do + { + p->OrderFall++; + if (!p->MinContext->Suffix) + return -1; + p->MinContext = Ppmd7_GetContext(p, p->MinContext->Suffix); + } + while (p->MinContext->NumStats == numMasked); + hiCnt = 0; + s = Ppmd7_GetStats(p, p->MinContext); + i = 0; + num = p->MinContext->NumStats - numMasked; + do + { + int k = (int)(MASK(s->Symbol)); + hiCnt += (s->Freq & k); + ps[i] = s++; + i -= k; + } + while (i != num); + + see = Ppmd7_MakeEscFreq(p, numMasked, &freqSum); + freqSum += hiCnt; + count = rc->GetThreshold(rc, freqSum); + + if (count < hiCnt) + { + Byte symbol; + CPpmd_State **pps = ps; + for (hiCnt = 0; (hiCnt += (*pps)->Freq) <= count; pps++); + s = *pps; + rc->Decode(rc, hiCnt - s->Freq, s->Freq); + Ppmd_See_Update(see); + p->FoundState = s; + symbol = s->Symbol; + Ppmd7_Update2(p); + return symbol; + } + if (count >= freqSum) + return -2; + rc->Decode(rc, hiCnt, freqSum - hiCnt); + see->Summ = (UInt16)(see->Summ + freqSum); + do { MASK(ps[--i]->Symbol) = 0; } while (i != 0); + } +} diff --git a/3rdparty/7z/src/Ppmd7Enc.c b/3rdparty/7z/src/Ppmd7Enc.c new file mode 100644 index 0000000000..a74d3002b6 --- /dev/null +++ b/3rdparty/7z/src/Ppmd7Enc.c @@ -0,0 +1,187 @@ +/* Ppmd7Enc.c -- PPMdH Encoder +2017-04-03 : Igor Pavlov : Public domain +This code is based on PPMd var.H (2001): Dmitry Shkarin : Public domain */ + +#include "Precomp.h" + +#include "Ppmd7.h" + +#define kTopValue (1 << 24) + +void Ppmd7z_RangeEnc_Init(CPpmd7z_RangeEnc *p) +{ + p->Low = 0; + p->Range = 0xFFFFFFFF; + p->Cache = 0; + p->CacheSize = 1; +} + +static void RangeEnc_ShiftLow(CPpmd7z_RangeEnc *p) +{ + if ((UInt32)p->Low < (UInt32)0xFF000000 || (unsigned)(p->Low >> 32) != 0) + { + Byte temp = p->Cache; + do + { + IByteOut_Write(p->Stream, (Byte)(temp + (Byte)(p->Low >> 32))); + temp = 0xFF; + } + while (--p->CacheSize != 0); + p->Cache = (Byte)((UInt32)p->Low >> 24); + } + p->CacheSize++; + p->Low = (UInt32)p->Low << 8; +} + +static void RangeEnc_Encode(CPpmd7z_RangeEnc *p, UInt32 start, UInt32 size, UInt32 total) +{ + p->Low += start * (p->Range /= total); + p->Range *= size; + while (p->Range < kTopValue) + { + p->Range <<= 8; + RangeEnc_ShiftLow(p); + } +} + +static void RangeEnc_EncodeBit_0(CPpmd7z_RangeEnc *p, UInt32 size0) +{ + p->Range = (p->Range >> 14) * size0; + while (p->Range < kTopValue) + { + p->Range <<= 8; + RangeEnc_ShiftLow(p); + } +} + +static void RangeEnc_EncodeBit_1(CPpmd7z_RangeEnc *p, UInt32 size0) +{ + UInt32 newBound = (p->Range >> 14) * size0; + p->Low += newBound; + p->Range -= newBound; + while (p->Range < kTopValue) + { + p->Range <<= 8; + RangeEnc_ShiftLow(p); + } +} + +void Ppmd7z_RangeEnc_FlushData(CPpmd7z_RangeEnc *p) +{ + unsigned i; + for (i = 0; i < 5; i++) + RangeEnc_ShiftLow(p); +} + + +#define MASK(sym) ((signed char *)charMask)[sym] + +void Ppmd7_EncodeSymbol(CPpmd7 *p, CPpmd7z_RangeEnc *rc, int symbol) +{ + size_t charMask[256 / sizeof(size_t)]; + if (p->MinContext->NumStats != 1) + { + CPpmd_State *s = Ppmd7_GetStats(p, p->MinContext); + UInt32 sum; + unsigned i; + if (s->Symbol == symbol) + { + RangeEnc_Encode(rc, 0, s->Freq, p->MinContext->SummFreq); + p->FoundState = s; + Ppmd7_Update1_0(p); + return; + } + p->PrevSuccess = 0; + sum = s->Freq; + i = p->MinContext->NumStats - 1; + do + { + if ((++s)->Symbol == symbol) + { + RangeEnc_Encode(rc, sum, s->Freq, p->MinContext->SummFreq); + p->FoundState = s; + Ppmd7_Update1(p); + return; + } + sum += s->Freq; + } + while (--i); + + p->HiBitsFlag = p->HB2Flag[p->FoundState->Symbol]; + PPMD_SetAllBitsIn256Bytes(charMask); + MASK(s->Symbol) = 0; + i = p->MinContext->NumStats - 1; + do { MASK((--s)->Symbol) = 0; } while (--i); + RangeEnc_Encode(rc, sum, p->MinContext->SummFreq - sum, p->MinContext->SummFreq); + } + else + { + UInt16 *prob = Ppmd7_GetBinSumm(p); + CPpmd_State *s = Ppmd7Context_OneState(p->MinContext); + if (s->Symbol == symbol) + { + RangeEnc_EncodeBit_0(rc, *prob); + *prob = (UInt16)PPMD_UPDATE_PROB_0(*prob); + p->FoundState = s; + Ppmd7_UpdateBin(p); + return; + } + else + { + RangeEnc_EncodeBit_1(rc, *prob); + *prob = (UInt16)PPMD_UPDATE_PROB_1(*prob); + p->InitEsc = PPMD7_kExpEscape[*prob >> 10]; + PPMD_SetAllBitsIn256Bytes(charMask); + MASK(s->Symbol) = 0; + p->PrevSuccess = 0; + } + } + for (;;) + { + UInt32 escFreq; + CPpmd_See *see; + CPpmd_State *s; + UInt32 sum; + unsigned i, numMasked = p->MinContext->NumStats; + do + { + p->OrderFall++; + if (!p->MinContext->Suffix) + return; /* EndMarker (symbol = -1) */ + p->MinContext = Ppmd7_GetContext(p, p->MinContext->Suffix); + } + while (p->MinContext->NumStats == numMasked); + + see = Ppmd7_MakeEscFreq(p, numMasked, &escFreq); + s = Ppmd7_GetStats(p, p->MinContext); + sum = 0; + i = p->MinContext->NumStats; + do + { + int cur = s->Symbol; + if (cur == symbol) + { + UInt32 low = sum; + CPpmd_State *s1 = s; + do + { + sum += (s->Freq & (int)(MASK(s->Symbol))); + s++; + } + while (--i); + RangeEnc_Encode(rc, low, s1->Freq, sum + escFreq); + Ppmd_See_Update(see); + p->FoundState = s1; + Ppmd7_Update2(p); + return; + } + sum += (s->Freq & (int)(MASK(cur))); + MASK(cur) = 0; + s++; + } + while (--i); + + RangeEnc_Encode(rc, sum, escFreq, sum + escFreq); + see->Summ = (UInt16)(see->Summ + sum + escFreq); + } +} diff --git a/3rdparty/7z/src/Precomp.h b/3rdparty/7z/src/Precomp.h new file mode 100644 index 0000000000..edb5814439 --- /dev/null +++ b/3rdparty/7z/src/Precomp.h @@ -0,0 +1,10 @@ +/* Precomp.h -- StdAfx +2013-11-12 : Igor Pavlov : Public domain */ + +#ifndef __7Z_PRECOMP_H +#define __7Z_PRECOMP_H + +#include "Compiler.h" +/* #include "7zTypes.h" */ + +#endif diff --git a/3rdparty/7z/src/RotateDefs.h b/3rdparty/7z/src/RotateDefs.h new file mode 100644 index 0000000000..6c790e791e --- /dev/null +++ b/3rdparty/7z/src/RotateDefs.h @@ -0,0 +1,30 @@ +/* RotateDefs.h -- Rotate functions +2015-03-25 : Igor Pavlov : Public domain */ + +#ifndef __ROTATE_DEFS_H +#define __ROTATE_DEFS_H + +#ifdef _MSC_VER + +#include + +/* don't use _rotl with MINGW. It can insert slow call to function. */ + +/* #if (_MSC_VER >= 1200) */ +#pragma intrinsic(_rotl) +#pragma intrinsic(_rotr) +/* #endif */ + +#define rotlFixed(x, n) _rotl((x), (n)) +#define rotrFixed(x, n) _rotr((x), (n)) + +#else + +/* new compilers can translate these macros to fast commands. */ + +#define rotlFixed(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) +#define rotrFixed(x, n) (((x) >> (n)) | ((x) << (32 - (n)))) + +#endif + +#endif diff --git a/3rdparty/7z/src/Sha256.c b/3rdparty/7z/src/Sha256.c new file mode 100644 index 0000000000..90994e5abb --- /dev/null +++ b/3rdparty/7z/src/Sha256.c @@ -0,0 +1,248 @@ +/* Crypto/Sha256.c -- SHA-256 Hash +2017-04-03 : Igor Pavlov : Public domain +This code is based on public domain code from Wei Dai's Crypto++ library. */ + +#include "Precomp.h" + +#include + +#include "CpuArch.h" +#include "RotateDefs.h" +#include "Sha256.h" + +/* define it for speed optimization */ +#ifndef _SFX +#define _SHA256_UNROLL +#define _SHA256_UNROLL2 +#endif + +/* #define _SHA256_UNROLL2 */ + +void Sha256_Init(CSha256 *p) +{ + p->state[0] = 0x6a09e667; + p->state[1] = 0xbb67ae85; + p->state[2] = 0x3c6ef372; + p->state[3] = 0xa54ff53a; + p->state[4] = 0x510e527f; + p->state[5] = 0x9b05688c; + p->state[6] = 0x1f83d9ab; + p->state[7] = 0x5be0cd19; + p->count = 0; +} + +#define S0(x) (rotrFixed(x, 2) ^ rotrFixed(x,13) ^ rotrFixed(x, 22)) +#define S1(x) (rotrFixed(x, 6) ^ rotrFixed(x,11) ^ rotrFixed(x, 25)) +#define s0(x) (rotrFixed(x, 7) ^ rotrFixed(x,18) ^ (x >> 3)) +#define s1(x) (rotrFixed(x,17) ^ rotrFixed(x,19) ^ (x >> 10)) + +#define blk0(i) (W[i]) +#define blk2(i) (W[i] += s1(W[((i)-2)&15]) + W[((i)-7)&15] + s0(W[((i)-15)&15])) + +#define Ch(x,y,z) (z^(x&(y^z))) +#define Maj(x,y,z) ((x&y)|(z&(x|y))) + +#ifdef _SHA256_UNROLL2 + +#define R(a,b,c,d,e,f,g,h, i) \ + h += S1(e) + Ch(e,f,g) + K[(i)+(size_t)(j)] + (j ? blk2(i) : blk0(i)); \ + d += h; \ + h += S0(a) + Maj(a, b, c) + +#define RX_8(i) \ + R(a,b,c,d,e,f,g,h, i); \ + R(h,a,b,c,d,e,f,g, i+1); \ + R(g,h,a,b,c,d,e,f, i+2); \ + R(f,g,h,a,b,c,d,e, i+3); \ + R(e,f,g,h,a,b,c,d, i+4); \ + R(d,e,f,g,h,a,b,c, i+5); \ + R(c,d,e,f,g,h,a,b, i+6); \ + R(b,c,d,e,f,g,h,a, i+7) + +#define RX_16 RX_8(0); RX_8(8); + +#else + +#define a(i) T[(0-(i))&7] +#define b(i) T[(1-(i))&7] +#define c(i) T[(2-(i))&7] +#define d(i) T[(3-(i))&7] +#define e(i) T[(4-(i))&7] +#define f(i) T[(5-(i))&7] +#define g(i) T[(6-(i))&7] +#define h(i) T[(7-(i))&7] + +#define R(i) \ + h(i) += S1(e(i)) + Ch(e(i),f(i),g(i)) + K[(i)+(size_t)(j)] + (j ? blk2(i) : blk0(i)); \ + d(i) += h(i); \ + h(i) += S0(a(i)) + Maj(a(i), b(i), c(i)) \ + +#ifdef _SHA256_UNROLL + +#define RX_8(i) R(i+0); R(i+1); R(i+2); R(i+3); R(i+4); R(i+5); R(i+6); R(i+7); +#define RX_16 RX_8(0); RX_8(8); + +#else + +#define RX_16 unsigned i; for (i = 0; i < 16; i++) { R(i); } + +#endif + +#endif + +static const UInt32 K[64] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 +}; + +static void Sha256_WriteByteBlock(CSha256 *p) +{ + UInt32 W[16]; + unsigned j; + UInt32 *state; + + #ifdef _SHA256_UNROLL2 + UInt32 a,b,c,d,e,f,g,h; + #else + UInt32 T[8]; + #endif + + for (j = 0; j < 16; j += 4) + { + const Byte *ccc = p->buffer + j * 4; + W[j ] = GetBe32(ccc); + W[j + 1] = GetBe32(ccc + 4); + W[j + 2] = GetBe32(ccc + 8); + W[j + 3] = GetBe32(ccc + 12); + } + + state = p->state; + + #ifdef _SHA256_UNROLL2 + a = state[0]; + b = state[1]; + c = state[2]; + d = state[3]; + e = state[4]; + f = state[5]; + g = state[6]; + h = state[7]; + #else + for (j = 0; j < 8; j++) + T[j] = state[j]; + #endif + + for (j = 0; j < 64; j += 16) + { + RX_16 + } + + #ifdef _SHA256_UNROLL2 + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; + state[4] += e; + state[5] += f; + state[6] += g; + state[7] += h; + #else + for (j = 0; j < 8; j++) + state[j] += T[j]; + #endif + + /* Wipe variables */ + /* memset(W, 0, sizeof(W)); */ + /* memset(T, 0, sizeof(T)); */ +} + +#undef S0 +#undef S1 +#undef s0 +#undef s1 + +void Sha256_Update(CSha256 *p, const Byte *data, size_t size) +{ + if (size == 0) + return; + + { + unsigned pos = (unsigned)p->count & 0x3F; + unsigned num; + + p->count += size; + + num = 64 - pos; + if (num > size) + { + memcpy(p->buffer + pos, data, size); + return; + } + + size -= num; + memcpy(p->buffer + pos, data, num); + data += num; + } + + for (;;) + { + Sha256_WriteByteBlock(p); + if (size < 64) + break; + size -= 64; + memcpy(p->buffer, data, 64); + data += 64; + } + + if (size != 0) + memcpy(p->buffer, data, size); +} + +void Sha256_Final(CSha256 *p, Byte *digest) +{ + unsigned pos = (unsigned)p->count & 0x3F; + unsigned i; + + p->buffer[pos++] = 0x80; + + while (pos != (64 - 8)) + { + pos &= 0x3F; + if (pos == 0) + Sha256_WriteByteBlock(p); + p->buffer[pos++] = 0; + } + + { + UInt64 numBits = (p->count << 3); + SetBe32(p->buffer + 64 - 8, (UInt32)(numBits >> 32)); + SetBe32(p->buffer + 64 - 4, (UInt32)(numBits)); + } + + Sha256_WriteByteBlock(p); + + for (i = 0; i < 8; i += 2) + { + UInt32 v0 = p->state[i]; + UInt32 v1 = p->state[i + 1]; + SetBe32(digest , v0); + SetBe32(digest + 4, v1); + digest += 8; + } + + Sha256_Init(p); +} diff --git a/3rdparty/7z/src/Sha256.h b/3rdparty/7z/src/Sha256.h new file mode 100644 index 0000000000..7f17ccf9c9 --- /dev/null +++ b/3rdparty/7z/src/Sha256.h @@ -0,0 +1,26 @@ +/* Sha256.h -- SHA-256 Hash +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __CRYPTO_SHA256_H +#define __CRYPTO_SHA256_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +#define SHA256_DIGEST_SIZE 32 + +typedef struct +{ + UInt32 state[8]; + UInt64 count; + Byte buffer[64]; +} CSha256; + +void Sha256_Init(CSha256 *p); +void Sha256_Update(CSha256 *p, const Byte *data, size_t size); +void Sha256_Final(CSha256 *p, Byte *digest); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Sort.c b/3rdparty/7z/src/Sort.c new file mode 100644 index 0000000000..73dcbf0596 --- /dev/null +++ b/3rdparty/7z/src/Sort.c @@ -0,0 +1,141 @@ +/* Sort.c -- Sort functions +2014-04-05 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "Sort.h" + +#define HeapSortDown(p, k, size, temp) \ + { for (;;) { \ + size_t s = (k << 1); \ + if (s > size) break; \ + if (s < size && p[s + 1] > p[s]) s++; \ + if (temp >= p[s]) break; \ + p[k] = p[s]; k = s; \ + } p[k] = temp; } + +void HeapSort(UInt32 *p, size_t size) +{ + if (size <= 1) + return; + p--; + { + size_t i = size / 2; + do + { + UInt32 temp = p[i]; + size_t k = i; + HeapSortDown(p, k, size, temp) + } + while (--i != 0); + } + /* + do + { + size_t k = 1; + UInt32 temp = p[size]; + p[size--] = p[1]; + HeapSortDown(p, k, size, temp) + } + while (size > 1); + */ + while (size > 3) + { + UInt32 temp = p[size]; + size_t k = (p[3] > p[2]) ? 3 : 2; + p[size--] = p[1]; + p[1] = p[k]; + HeapSortDown(p, k, size, temp) + } + { + UInt32 temp = p[size]; + p[size] = p[1]; + if (size > 2 && p[2] < temp) + { + p[1] = p[2]; + p[2] = temp; + } + else + p[1] = temp; + } +} + +void HeapSort64(UInt64 *p, size_t size) +{ + if (size <= 1) + return; + p--; + { + size_t i = size / 2; + do + { + UInt64 temp = p[i]; + size_t k = i; + HeapSortDown(p, k, size, temp) + } + while (--i != 0); + } + /* + do + { + size_t k = 1; + UInt64 temp = p[size]; + p[size--] = p[1]; + HeapSortDown(p, k, size, temp) + } + while (size > 1); + */ + while (size > 3) + { + UInt64 temp = p[size]; + size_t k = (p[3] > p[2]) ? 3 : 2; + p[size--] = p[1]; + p[1] = p[k]; + HeapSortDown(p, k, size, temp) + } + { + UInt64 temp = p[size]; + p[size] = p[1]; + if (size > 2 && p[2] < temp) + { + p[1] = p[2]; + p[2] = temp; + } + else + p[1] = temp; + } +} + +/* +#define HeapSortRefDown(p, vals, n, size, temp) \ + { size_t k = n; UInt32 val = vals[temp]; for (;;) { \ + size_t s = (k << 1); \ + if (s > size) break; \ + if (s < size && vals[p[s + 1]] > vals[p[s]]) s++; \ + if (val >= vals[p[s]]) break; \ + p[k] = p[s]; k = s; \ + } p[k] = temp; } + +void HeapSortRef(UInt32 *p, UInt32 *vals, size_t size) +{ + if (size <= 1) + return; + p--; + { + size_t i = size / 2; + do + { + UInt32 temp = p[i]; + HeapSortRefDown(p, vals, i, size, temp); + } + while (--i != 0); + } + do + { + UInt32 temp = p[size]; + p[size--] = p[1]; + HeapSortRefDown(p, vals, 1, size, temp); + } + while (size > 1); +} +*/ diff --git a/3rdparty/7z/src/Sort.h b/3rdparty/7z/src/Sort.h new file mode 100644 index 0000000000..7209d7824d --- /dev/null +++ b/3rdparty/7z/src/Sort.h @@ -0,0 +1,18 @@ +/* Sort.h -- Sort functions +2014-04-05 : Igor Pavlov : Public domain */ + +#ifndef __7Z_SORT_H +#define __7Z_SORT_H + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +void HeapSort(UInt32 *p, size_t size); +void HeapSort64(UInt64 *p, size_t size); + +/* void HeapSortRef(UInt32 *p, UInt32 *vals, size_t size); */ + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Threads.c b/3rdparty/7z/src/Threads.c new file mode 100644 index 0000000000..8fd86f224b --- /dev/null +++ b/3rdparty/7z/src/Threads.c @@ -0,0 +1,95 @@ +/* Threads.c -- multithreading library +2017-06-26 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#ifndef UNDER_CE +#include +#endif + +#include "Threads.h" + +static WRes GetError() +{ + DWORD res = GetLastError(); + return res ? (WRes)res : 1; +} + +static WRes HandleToWRes(HANDLE h) { return (h != NULL) ? 0 : GetError(); } +static WRes BOOLToWRes(BOOL v) { return v ? 0 : GetError(); } + +WRes HandlePtr_Close(HANDLE *p) +{ + if (*p != NULL) + { + if (!CloseHandle(*p)) + return GetError(); + *p = NULL; + } + return 0; +} + +WRes Handle_WaitObject(HANDLE h) { return (WRes)WaitForSingleObject(h, INFINITE); } + +WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param) +{ + /* Windows Me/98/95: threadId parameter may not be NULL in _beginthreadex/CreateThread functions */ + + #ifdef UNDER_CE + + DWORD threadId; + *p = CreateThread(0, 0, func, param, 0, &threadId); + + #else + + unsigned threadId; + *p = (HANDLE)_beginthreadex(NULL, 0, func, param, 0, &threadId); + + #endif + + /* maybe we must use errno here, but probably GetLastError() is also OK. */ + return HandleToWRes(*p); +} + +static WRes Event_Create(CEvent *p, BOOL manualReset, int signaled) +{ + *p = CreateEvent(NULL, manualReset, (signaled ? TRUE : FALSE), NULL); + return HandleToWRes(*p); +} + +WRes Event_Set(CEvent *p) { return BOOLToWRes(SetEvent(*p)); } +WRes Event_Reset(CEvent *p) { return BOOLToWRes(ResetEvent(*p)); } + +WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled) { return Event_Create(p, TRUE, signaled); } +WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled) { return Event_Create(p, FALSE, signaled); } +WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p) { return ManualResetEvent_Create(p, 0); } +WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p) { return AutoResetEvent_Create(p, 0); } + + +WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount) +{ + *p = CreateSemaphore(NULL, (LONG)initCount, (LONG)maxCount, NULL); + return HandleToWRes(*p); +} + +static WRes Semaphore_Release(CSemaphore *p, LONG releaseCount, LONG *previousCount) + { return BOOLToWRes(ReleaseSemaphore(*p, releaseCount, previousCount)); } +WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num) + { return Semaphore_Release(p, (LONG)num, NULL); } +WRes Semaphore_Release1(CSemaphore *p) { return Semaphore_ReleaseN(p, 1); } + +WRes CriticalSection_Init(CCriticalSection *p) +{ + /* InitializeCriticalSection can raise only STATUS_NO_MEMORY exception */ + #ifdef _MSC_VER + __try + #endif + { + InitializeCriticalSection(p); + /* InitializeCriticalSectionAndSpinCount(p, 0); */ + } + #ifdef _MSC_VER + __except (EXCEPTION_EXECUTE_HANDLER) { return 1; } + #endif + return 0; +} diff --git a/3rdparty/7z/src/Threads.h b/3rdparty/7z/src/Threads.h new file mode 100644 index 0000000000..f913241aea --- /dev/null +++ b/3rdparty/7z/src/Threads.h @@ -0,0 +1,68 @@ +/* Threads.h -- multithreading library +2017-06-18 : Igor Pavlov : Public domain */ + +#ifndef __7Z_THREADS_H +#define __7Z_THREADS_H + +#ifdef _WIN32 +#include +#endif + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +WRes HandlePtr_Close(HANDLE *h); +WRes Handle_WaitObject(HANDLE h); + +typedef HANDLE CThread; +#define Thread_Construct(p) *(p) = NULL +#define Thread_WasCreated(p) (*(p) != NULL) +#define Thread_Close(p) HandlePtr_Close(p) +#define Thread_Wait(p) Handle_WaitObject(*(p)) + +typedef +#ifdef UNDER_CE + DWORD +#else + unsigned +#endif + THREAD_FUNC_RET_TYPE; + +#define THREAD_FUNC_CALL_TYPE MY_STD_CALL +#define THREAD_FUNC_DECL THREAD_FUNC_RET_TYPE THREAD_FUNC_CALL_TYPE +typedef THREAD_FUNC_RET_TYPE (THREAD_FUNC_CALL_TYPE * THREAD_FUNC_TYPE)(void *); +WRes Thread_Create(CThread *p, THREAD_FUNC_TYPE func, LPVOID param); + +typedef HANDLE CEvent; +typedef CEvent CAutoResetEvent; +typedef CEvent CManualResetEvent; +#define Event_Construct(p) *(p) = NULL +#define Event_IsCreated(p) (*(p) != NULL) +#define Event_Close(p) HandlePtr_Close(p) +#define Event_Wait(p) Handle_WaitObject(*(p)) +WRes Event_Set(CEvent *p); +WRes Event_Reset(CEvent *p); +WRes ManualResetEvent_Create(CManualResetEvent *p, int signaled); +WRes ManualResetEvent_CreateNotSignaled(CManualResetEvent *p); +WRes AutoResetEvent_Create(CAutoResetEvent *p, int signaled); +WRes AutoResetEvent_CreateNotSignaled(CAutoResetEvent *p); + +typedef HANDLE CSemaphore; +#define Semaphore_Construct(p) *(p) = NULL +#define Semaphore_IsCreated(p) (*(p) != NULL) +#define Semaphore_Close(p) HandlePtr_Close(p) +#define Semaphore_Wait(p) Handle_WaitObject(*(p)) +WRes Semaphore_Create(CSemaphore *p, UInt32 initCount, UInt32 maxCount); +WRes Semaphore_ReleaseN(CSemaphore *p, UInt32 num); +WRes Semaphore_Release1(CSemaphore *p); + +typedef CRITICAL_SECTION CCriticalSection; +WRes CriticalSection_Init(CCriticalSection *p); +#define CriticalSection_Delete(p) DeleteCriticalSection(p) +#define CriticalSection_Enter(p) EnterCriticalSection(p) +#define CriticalSection_Leave(p) LeaveCriticalSection(p) + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/Xz.c b/3rdparty/7z/src/Xz.c new file mode 100644 index 0000000000..7e061d6e7d --- /dev/null +++ b/3rdparty/7z/src/Xz.c @@ -0,0 +1,90 @@ +/* Xz.c - Xz +2017-05-12 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "7zCrc.h" +#include "CpuArch.h" +#include "Xz.h" +#include "XzCrc64.h" + +const Byte XZ_SIG[XZ_SIG_SIZE] = { 0xFD, '7', 'z', 'X', 'Z', 0 }; +/* const Byte XZ_FOOTER_SIG[XZ_FOOTER_SIG_SIZE] = { 'Y', 'Z' }; */ + +unsigned Xz_WriteVarInt(Byte *buf, UInt64 v) +{ + unsigned i = 0; + do + { + buf[i++] = (Byte)((v & 0x7F) | 0x80); + v >>= 7; + } + while (v != 0); + buf[(size_t)i - 1] &= 0x7F; + return i; +} + +void Xz_Construct(CXzStream *p) +{ + p->numBlocks = 0; + p->blocks = NULL; + p->flags = 0; +} + +void Xz_Free(CXzStream *p, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, p->blocks); + p->numBlocks = 0; + p->blocks = NULL; +} + +unsigned XzFlags_GetCheckSize(CXzStreamFlags f) +{ + unsigned t = XzFlags_GetCheckType(f); + return (t == 0) ? 0 : (4 << ((t - 1) / 3)); +} + +void XzCheck_Init(CXzCheck *p, unsigned mode) +{ + p->mode = mode; + switch (mode) + { + case XZ_CHECK_CRC32: p->crc = CRC_INIT_VAL; break; + case XZ_CHECK_CRC64: p->crc64 = CRC64_INIT_VAL; break; + case XZ_CHECK_SHA256: Sha256_Init(&p->sha); break; + } +} + +void XzCheck_Update(CXzCheck *p, const void *data, size_t size) +{ + switch (p->mode) + { + case XZ_CHECK_CRC32: p->crc = CrcUpdate(p->crc, data, size); break; + case XZ_CHECK_CRC64: p->crc64 = Crc64Update(p->crc64, data, size); break; + case XZ_CHECK_SHA256: Sha256_Update(&p->sha, (const Byte *)data, size); break; + } +} + +int XzCheck_Final(CXzCheck *p, Byte *digest) +{ + switch (p->mode) + { + case XZ_CHECK_CRC32: + SetUi32(digest, CRC_GET_DIGEST(p->crc)); + break; + case XZ_CHECK_CRC64: + { + int i; + UInt64 v = CRC64_GET_DIGEST(p->crc64); + for (i = 0; i < 8; i++, v >>= 8) + digest[i] = (Byte)(v & 0xFF); + break; + } + case XZ_CHECK_SHA256: + Sha256_Final(&p->sha, digest); + break; + default: + return 0; + } + return 1; +} diff --git a/3rdparty/7z/src/Xz.h b/3rdparty/7z/src/Xz.h new file mode 100644 index 0000000000..fad56a3fb7 --- /dev/null +++ b/3rdparty/7z/src/Xz.h @@ -0,0 +1,460 @@ +/* Xz.h - Xz interface +2018-07-04 : Igor Pavlov : Public domain */ + +#ifndef __XZ_H +#define __XZ_H + +#include "Sha256.h" + +EXTERN_C_BEGIN + +#define XZ_ID_Subblock 1 +#define XZ_ID_Delta 3 +#define XZ_ID_X86 4 +#define XZ_ID_PPC 5 +#define XZ_ID_IA64 6 +#define XZ_ID_ARM 7 +#define XZ_ID_ARMT 8 +#define XZ_ID_SPARC 9 +#define XZ_ID_LZMA2 0x21 + +unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value); +unsigned Xz_WriteVarInt(Byte *buf, UInt64 v); + +/* ---------- xz block ---------- */ + +#define XZ_BLOCK_HEADER_SIZE_MAX 1024 + +#define XZ_NUM_FILTERS_MAX 4 +#define XZ_BF_NUM_FILTERS_MASK 3 +#define XZ_BF_PACK_SIZE (1 << 6) +#define XZ_BF_UNPACK_SIZE (1 << 7) + +#define XZ_FILTER_PROPS_SIZE_MAX 20 + +typedef struct +{ + UInt64 id; + UInt32 propsSize; + Byte props[XZ_FILTER_PROPS_SIZE_MAX]; +} CXzFilter; + +typedef struct +{ + UInt64 packSize; + UInt64 unpackSize; + Byte flags; + CXzFilter filters[XZ_NUM_FILTERS_MAX]; +} CXzBlock; + +#define XzBlock_GetNumFilters(p) (((p)->flags & XZ_BF_NUM_FILTERS_MASK) + 1) +#define XzBlock_HasPackSize(p) (((p)->flags & XZ_BF_PACK_SIZE) != 0) +#define XzBlock_HasUnpackSize(p) (((p)->flags & XZ_BF_UNPACK_SIZE) != 0) +#define XzBlock_HasUnsupportedFlags(p) (((p)->flags & ~(XZ_BF_NUM_FILTERS_MASK | XZ_BF_PACK_SIZE | XZ_BF_UNPACK_SIZE)) != 0) + +SRes XzBlock_Parse(CXzBlock *p, const Byte *header); +SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, BoolInt *isIndex, UInt32 *headerSizeRes); + +/* ---------- xz stream ---------- */ + +#define XZ_SIG_SIZE 6 +#define XZ_FOOTER_SIG_SIZE 2 + +extern const Byte XZ_SIG[XZ_SIG_SIZE]; + +/* +extern const Byte XZ_FOOTER_SIG[XZ_FOOTER_SIG_SIZE]; +*/ + +#define XZ_FOOTER_SIG_0 'Y' +#define XZ_FOOTER_SIG_1 'Z' + +#define XZ_STREAM_FLAGS_SIZE 2 +#define XZ_STREAM_CRC_SIZE 4 + +#define XZ_STREAM_HEADER_SIZE (XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE + XZ_STREAM_CRC_SIZE) +#define XZ_STREAM_FOOTER_SIZE (XZ_FOOTER_SIG_SIZE + XZ_STREAM_FLAGS_SIZE + XZ_STREAM_CRC_SIZE + 4) + +#define XZ_CHECK_MASK 0xF +#define XZ_CHECK_NO 0 +#define XZ_CHECK_CRC32 1 +#define XZ_CHECK_CRC64 4 +#define XZ_CHECK_SHA256 10 + +typedef struct +{ + unsigned mode; + UInt32 crc; + UInt64 crc64; + CSha256 sha; +} CXzCheck; + +void XzCheck_Init(CXzCheck *p, unsigned mode); +void XzCheck_Update(CXzCheck *p, const void *data, size_t size); +int XzCheck_Final(CXzCheck *p, Byte *digest); + +typedef UInt16 CXzStreamFlags; + +#define XzFlags_IsSupported(f) ((f) <= XZ_CHECK_MASK) +#define XzFlags_GetCheckType(f) ((f) & XZ_CHECK_MASK) +#define XzFlags_HasDataCrc32(f) (Xz_GetCheckType(f) == XZ_CHECK_CRC32) +unsigned XzFlags_GetCheckSize(CXzStreamFlags f); + +SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf); +SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream); + +typedef struct +{ + UInt64 unpackSize; + UInt64 totalSize; +} CXzBlockSizes; + +typedef struct +{ + CXzStreamFlags flags; + size_t numBlocks; + CXzBlockSizes *blocks; + UInt64 startOffset; +} CXzStream; + +void Xz_Construct(CXzStream *p); +void Xz_Free(CXzStream *p, ISzAllocPtr alloc); + +#define XZ_SIZE_OVERFLOW ((UInt64)(Int64)-1) + +UInt64 Xz_GetUnpackSize(const CXzStream *p); +UInt64 Xz_GetPackSize(const CXzStream *p); + +typedef struct +{ + size_t num; + size_t numAllocated; + CXzStream *streams; +} CXzs; + +void Xzs_Construct(CXzs *p); +void Xzs_Free(CXzs *p, ISzAllocPtr alloc); +SRes Xzs_ReadBackward(CXzs *p, ILookInStream *inStream, Int64 *startOffset, ICompressProgress *progress, ISzAllocPtr alloc); + +UInt64 Xzs_GetNumBlocks(const CXzs *p); +UInt64 Xzs_GetUnpackSize(const CXzs *p); + + +// ECoderStatus values are identical to ELzmaStatus values of LZMA2 decoder + +typedef enum +{ + CODER_STATUS_NOT_SPECIFIED, /* use main error code instead */ + CODER_STATUS_FINISHED_WITH_MARK, /* stream was finished with end mark. */ + CODER_STATUS_NOT_FINISHED, /* stream was not finished */ + CODER_STATUS_NEEDS_MORE_INPUT /* you must provide more input bytes */ +} ECoderStatus; + + +// ECoderFinishMode values are identical to ELzmaFinishMode + +typedef enum +{ + CODER_FINISH_ANY, /* finish at any point */ + CODER_FINISH_END /* block must be finished at the end */ +} ECoderFinishMode; + + +typedef struct _IStateCoder +{ + void *p; + void (*Free)(void *p, ISzAllocPtr alloc); + SRes (*SetProps)(void *p, const Byte *props, size_t propSize, ISzAllocPtr alloc); + void (*Init)(void *p); + SRes (*Code2)(void *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + int srcWasFinished, ECoderFinishMode finishMode, + // int *wasFinished, + ECoderStatus *status); + SizeT (*Filter)(void *p, Byte *data, SizeT size); +} IStateCoder; + + + +#define MIXCODER_NUM_FILTERS_MAX 4 + +typedef struct +{ + ISzAllocPtr alloc; + Byte *buf; + unsigned numCoders; + + Byte *outBuf; + size_t outBufSize; + size_t outWritten; // is equal to lzmaDecoder.dicPos (in outBuf mode) + BoolInt wasFinished; + SRes res; + ECoderStatus status; + // BoolInt SingleBufMode; + + int finished[MIXCODER_NUM_FILTERS_MAX - 1]; + size_t pos[MIXCODER_NUM_FILTERS_MAX - 1]; + size_t size[MIXCODER_NUM_FILTERS_MAX - 1]; + UInt64 ids[MIXCODER_NUM_FILTERS_MAX]; + SRes results[MIXCODER_NUM_FILTERS_MAX]; + IStateCoder coders[MIXCODER_NUM_FILTERS_MAX]; +} CMixCoder; + + +typedef enum +{ + XZ_STATE_STREAM_HEADER, + XZ_STATE_STREAM_INDEX, + XZ_STATE_STREAM_INDEX_CRC, + XZ_STATE_STREAM_FOOTER, + XZ_STATE_STREAM_PADDING, + XZ_STATE_BLOCK_HEADER, + XZ_STATE_BLOCK, + XZ_STATE_BLOCK_FOOTER +} EXzState; + + +typedef struct +{ + EXzState state; + UInt32 pos; + unsigned alignPos; + unsigned indexPreSize; + + CXzStreamFlags streamFlags; + + UInt32 blockHeaderSize; + UInt64 packSize; + UInt64 unpackSize; + + UInt64 numBlocks; // number of finished blocks in current stream + UInt64 indexSize; + UInt64 indexPos; + UInt64 padSize; + + UInt64 numStartedStreams; + UInt64 numFinishedStreams; + UInt64 numTotalBlocks; + + UInt32 crc; + CMixCoder decoder; + CXzBlock block; + CXzCheck check; + CSha256 sha; + + BoolInt parseMode; + BoolInt headerParsedOk; + BoolInt decodeToStreamSignature; + unsigned decodeOnlyOneBlock; + + Byte *outBuf; + size_t outBufSize; + size_t outDataWritten; // the size of data in (outBuf) that were fully unpacked + + Byte shaDigest[SHA256_DIGEST_SIZE]; + Byte buf[XZ_BLOCK_HEADER_SIZE_MAX]; +} CXzUnpacker; + +/* alloc : aligned for cache line allocation is better */ +void XzUnpacker_Construct(CXzUnpacker *p, ISzAllocPtr alloc); +void XzUnpacker_Init(CXzUnpacker *p); +void XzUnpacker_SetOutBuf(CXzUnpacker *p, Byte *outBuf, size_t outBufSize); +void XzUnpacker_Free(CXzUnpacker *p); + +/* + XzUnpacker + The sequence for decoding functions: + { + XzUnpacker_Construct() + [Decoding_Calls] + XzUnpacker_Free() + } + + [Decoding_Calls] + + There are 3 types of interfaces for [Decoding_Calls] calls: + + Interface-1 : Partial output buffers: + { + XzUnpacker_Init() + for() + XzUnpacker_Code(); + } + + Interface-2 : Direct output buffer: + Use it, if you know exact size of decoded data, and you need + whole xz unpacked data in one output buffer. + xz unpacker doesn't allocate additional buffer for lzma2 dictionary in that mode. + { + XzUnpacker_Init() + XzUnpacker_SetOutBufMode(); // to set output buffer and size + for() + XzUnpacker_Code(); // (dest = NULL) in XzUnpacker_Code() + } + + Interface-3 : Direct output buffer : One call full decoding + It unpacks whole input buffer to output buffer in one call. + It uses Interface-2 internally. + { + XzUnpacker_CodeFull() + } +*/ + +/* +finishMode: + It has meaning only if the decoding reaches output limit (*destLen). + CODER_FINISH_ANY - use smallest number of input bytes + CODER_FINISH_END - read EndOfStream marker after decoding + +Returns: + SZ_OK + status: + CODER_STATUS_NOT_FINISHED, + CODER_STATUS_NEEDS_MORE_INPUT - maybe there are more xz streams, + call XzUnpacker_IsStreamWasFinished to check that current stream was finished + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_DATA - Data error + SZ_ERROR_UNSUPPORTED - Unsupported method or method properties + SZ_ERROR_CRC - CRC error + // SZ_ERROR_INPUT_EOF - It needs more bytes in input buffer (src). + + SZ_ERROR_NO_ARCHIVE - the error with xz Stream Header with one of the following reasons: + - xz Stream Signature failure + - CRC32 of xz Stream Header is failed + - The size of Stream padding is not multiple of four bytes. + It's possible to get that error, if xz stream was finished and the stream + contains some another data. In that case you can call XzUnpacker_GetExtraSize() + function to get real size of xz stream. +*/ + + +SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, int srcFinished, + ECoderFinishMode finishMode, ECoderStatus *status); + +SRes XzUnpacker_CodeFull(CXzUnpacker *p, Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, + ECoderFinishMode finishMode, ECoderStatus *status); + +BoolInt XzUnpacker_IsStreamWasFinished(const CXzUnpacker *p); + +/* +XzUnpacker_GetExtraSize() returns then number of uncofirmed bytes, + if it's in (XZ_STATE_STREAM_HEADER) state or in (XZ_STATE_STREAM_PADDING) state. +These bytes can be some bytes after xz archive, or +it can be start of new xz stream. + +Call XzUnpacker_GetExtraSize() after XzUnpacker_Code() function to detect real size of +xz stream in two cases, if XzUnpacker_Code() returns: + res == SZ_OK && status == CODER_STATUS_NEEDS_MORE_INPUT + res == SZ_ERROR_NO_ARCHIVE +*/ + +UInt64 XzUnpacker_GetExtraSize(const CXzUnpacker *p); + + +/* + for random block decoding: + XzUnpacker_Init(); + set CXzUnpacker::streamFlags + XzUnpacker_PrepareToRandomBlockDecoding() + loop + { + XzUnpacker_Code() + XzUnpacker_IsBlockFinished() + } +*/ + +void XzUnpacker_PrepareToRandomBlockDecoding(CXzUnpacker *p); +BoolInt XzUnpacker_IsBlockFinished(const CXzUnpacker *p); + +#define XzUnpacker_GetPackSizeForIndex(p) ((p)->packSize + (p)->blockHeaderSize + XzFlags_GetCheckSize((p)->streamFlags)) + + + +/* ---------- Multi Threading Decoding ---------- */ + + +typedef struct +{ + size_t inBufSize_ST; + size_t outStep_ST; + BoolInt ignoreErrors; + + #ifndef _7ZIP_ST + unsigned numThreads; + size_t inBufSize_MT; + size_t memUseMax; + #endif +} CXzDecMtProps; + +void XzDecMtProps_Init(CXzDecMtProps *p); + + +typedef void * CXzDecMtHandle; + +/* + alloc : XzDecMt uses CAlignOffsetAlloc for addresses allocated by (alloc). + allocMid : for big allocations, aligned allocation is better +*/ + +CXzDecMtHandle XzDecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid); +void XzDecMt_Destroy(CXzDecMtHandle p); + + +typedef struct +{ + Byte UnpackSize_Defined; + Byte NumStreams_Defined; + Byte NumBlocks_Defined; + + Byte DataAfterEnd; + Byte DecodingTruncated; // Decoding was Truncated, we need only partial output data + + UInt64 InSize; // pack size processed + UInt64 OutSize; + + UInt64 NumStreams; + UInt64 NumBlocks; + + SRes DecodeRes; + SRes ReadRes; + SRes ProgressRes; + SRes CombinedRes; + SRes CombinedRes_Type; + +} CXzStatInfo; + +void XzStatInfo_Clear(CXzStatInfo *p); + +/* +XzDecMt_Decode() +SRes: + SZ_OK - OK + SZ_ERROR_MEM - Memory allocation error + SZ_ERROR_NO_ARCHIVE - is not xz archive + SZ_ERROR_ARCHIVE - Headers error + SZ_ERROR_DATA - Data Error + SZ_ERROR_CRC - CRC Error + SZ_ERROR_INPUT_EOF - it needs more input data + SZ_ERROR_WRITE - ISeqOutStream error + (SZ_ERROR_READ) - ISeqInStream errors + (SZ_ERROR_PROGRESS) - ICompressProgress errors + // SZ_ERROR_THREAD - error in multi-threading functions + MY_SRes_HRESULT_FROM_WRes(WRes_error) - error in multi-threading function +*/ + +SRes XzDecMt_Decode(CXzDecMtHandle p, + const CXzDecMtProps *props, + const UInt64 *outDataSize, // NULL means undefined + int finishMode, // 0 - partial unpacking is allowed, 1 - xz stream(s) must be finished + ISeqOutStream *outStream, + // Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + // const Byte *inData, size_t inDataSize, + CXzStatInfo *stat, + int *isMT, // 0 means that ST (Single-Thread) version was used + ICompressProgress *progress); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/XzCrc64.c b/3rdparty/7z/src/XzCrc64.c new file mode 100644 index 0000000000..e9ca9ec265 --- /dev/null +++ b/3rdparty/7z/src/XzCrc64.c @@ -0,0 +1,86 @@ +/* XzCrc64.c -- CRC64 calculation +2017-05-23 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "XzCrc64.h" +#include "CpuArch.h" + +#define kCrc64Poly UINT64_CONST(0xC96C5795D7870F42) + +#ifdef MY_CPU_LE + #define CRC64_NUM_TABLES 4 +#else + #define CRC64_NUM_TABLES 5 + #define CRC_UINT64_SWAP(v) \ + ((v >> 56) \ + | ((v >> 40) & ((UInt64)0xFF << 8)) \ + | ((v >> 24) & ((UInt64)0xFF << 16)) \ + | ((v >> 8) & ((UInt64)0xFF << 24)) \ + | ((v << 8) & ((UInt64)0xFF << 32)) \ + | ((v << 24) & ((UInt64)0xFF << 40)) \ + | ((v << 40) & ((UInt64)0xFF << 48)) \ + | ((v << 56))) + + UInt64 MY_FAST_CALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table); +#endif + +#ifndef MY_CPU_BE + UInt64 MY_FAST_CALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table); +#endif + +typedef UInt64 (MY_FAST_CALL *CRC64_FUNC)(UInt64 v, const void *data, size_t size, const UInt64 *table); + +static CRC64_FUNC g_Crc64Update; +UInt64 g_Crc64Table[256 * CRC64_NUM_TABLES]; + +UInt64 MY_FAST_CALL Crc64Update(UInt64 v, const void *data, size_t size) +{ + return g_Crc64Update(v, data, size, g_Crc64Table); +} + +UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size) +{ + return g_Crc64Update(CRC64_INIT_VAL, data, size, g_Crc64Table) ^ CRC64_INIT_VAL; +} + +void MY_FAST_CALL Crc64GenerateTable() +{ + UInt32 i; + for (i = 0; i < 256; i++) + { + UInt64 r = i; + unsigned j; + for (j = 0; j < 8; j++) + r = (r >> 1) ^ (kCrc64Poly & ((UInt64)0 - (r & 1))); + g_Crc64Table[i] = r; + } + for (i = 256; i < 256 * CRC64_NUM_TABLES; i++) + { + UInt64 r = g_Crc64Table[(size_t)i - 256]; + g_Crc64Table[i] = g_Crc64Table[r & 0xFF] ^ (r >> 8); + } + + #ifdef MY_CPU_LE + + g_Crc64Update = XzCrc64UpdateT4; + + #else + { + #ifndef MY_CPU_BE + UInt32 k = 1; + if (*(const Byte *)&k == 1) + g_Crc64Update = XzCrc64UpdateT4; + else + #endif + { + for (i = 256 * CRC64_NUM_TABLES - 1; i >= 256; i--) + { + UInt64 x = g_Crc64Table[(size_t)i - 256]; + g_Crc64Table[i] = CRC_UINT64_SWAP(x); + } + g_Crc64Update = XzCrc64UpdateT1_BeT4; + } + } + #endif +} diff --git a/3rdparty/7z/src/XzCrc64.h b/3rdparty/7z/src/XzCrc64.h new file mode 100644 index 0000000000..71b10d57e5 --- /dev/null +++ b/3rdparty/7z/src/XzCrc64.h @@ -0,0 +1,26 @@ +/* XzCrc64.h -- CRC64 calculation +2013-01-18 : Igor Pavlov : Public domain */ + +#ifndef __XZ_CRC64_H +#define __XZ_CRC64_H + +#include + +#include "7zTypes.h" + +EXTERN_C_BEGIN + +extern UInt64 g_Crc64Table[]; + +void MY_FAST_CALL Crc64GenerateTable(void); + +#define CRC64_INIT_VAL UINT64_CONST(0xFFFFFFFFFFFFFFFF) +#define CRC64_GET_DIGEST(crc) ((crc) ^ CRC64_INIT_VAL) +#define CRC64_UPDATE_BYTE(crc, b) (g_Crc64Table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8)) + +UInt64 MY_FAST_CALL Crc64Update(UInt64 crc, const void *data, size_t size); +UInt64 MY_FAST_CALL Crc64Calc(const void *data, size_t size); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/XzCrc64Opt.c b/3rdparty/7z/src/XzCrc64Opt.c new file mode 100644 index 0000000000..9273465d47 --- /dev/null +++ b/3rdparty/7z/src/XzCrc64Opt.c @@ -0,0 +1,69 @@ +/* XzCrc64Opt.c -- CRC64 calculation +2017-06-30 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include "CpuArch.h" + +#ifndef MY_CPU_BE + +#define CRC64_UPDATE_BYTE_2(crc, b) (table[((crc) ^ (b)) & 0xFF] ^ ((crc) >> 8)) + +UInt64 MY_FAST_CALL XzCrc64UpdateT4(UInt64 v, const void *data, size_t size, const UInt64 *table) +{ + const Byte *p = (const Byte *)data; + for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++) + v = CRC64_UPDATE_BYTE_2(v, *p); + for (; size >= 4; size -= 4, p += 4) + { + UInt32 d = (UInt32)v ^ *(const UInt32 *)p; + v = (v >> 32) + ^ (table + 0x300)[((d ) & 0xFF)] + ^ (table + 0x200)[((d >> 8) & 0xFF)] + ^ (table + 0x100)[((d >> 16) & 0xFF)] + ^ (table + 0x000)[((d >> 24))]; + } + for (; size > 0; size--, p++) + v = CRC64_UPDATE_BYTE_2(v, *p); + return v; +} + +#endif + + +#ifndef MY_CPU_LE + +#define CRC_UINT64_SWAP(v) \ + ((v >> 56) \ + | ((v >> 40) & ((UInt64)0xFF << 8)) \ + | ((v >> 24) & ((UInt64)0xFF << 16)) \ + | ((v >> 8) & ((UInt64)0xFF << 24)) \ + | ((v << 8) & ((UInt64)0xFF << 32)) \ + | ((v << 24) & ((UInt64)0xFF << 40)) \ + | ((v << 40) & ((UInt64)0xFF << 48)) \ + | ((v << 56))) + +#define CRC64_UPDATE_BYTE_2_BE(crc, b) (table[(Byte)((crc) >> 56) ^ (b)] ^ ((crc) << 8)) + +UInt64 MY_FAST_CALL XzCrc64UpdateT1_BeT4(UInt64 v, const void *data, size_t size, const UInt64 *table) +{ + const Byte *p = (const Byte *)data; + table += 0x100; + v = CRC_UINT64_SWAP(v); + for (; size > 0 && ((unsigned)(ptrdiff_t)p & 3) != 0; size--, p++) + v = CRC64_UPDATE_BYTE_2_BE(v, *p); + for (; size >= 4; size -= 4, p += 4) + { + UInt32 d = (UInt32)(v >> 32) ^ *(const UInt32 *)p; + v = (v << 32) + ^ (table + 0x000)[((d ) & 0xFF)] + ^ (table + 0x100)[((d >> 8) & 0xFF)] + ^ (table + 0x200)[((d >> 16) & 0xFF)] + ^ (table + 0x300)[((d >> 24))]; + } + for (; size > 0; size--, p++) + v = CRC64_UPDATE_BYTE_2_BE(v, *p); + return CRC_UINT64_SWAP(v); +} + +#endif diff --git a/3rdparty/7z/src/XzDec.c b/3rdparty/7z/src/XzDec.c new file mode 100644 index 0000000000..4f53272078 --- /dev/null +++ b/3rdparty/7z/src/XzDec.c @@ -0,0 +1,2766 @@ +/* XzDec.c -- Xz Decode +2019-02-02 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +// #include + +// #define XZ_DUMP + +/* #define XZ_DUMP */ + +#ifdef XZ_DUMP +#include +#endif + +// #define SHOW_DEBUG_INFO + +#ifdef SHOW_DEBUG_INFO +#include +#endif + +#ifdef SHOW_DEBUG_INFO +#define PRF(x) x +#else +#define PRF(x) +#endif + +#define PRF_STR(s) PRF(printf("\n" s "\n")) +#define PRF_STR_INT(s, d) PRF(printf("\n" s " %d\n", (unsigned)d)) + +#include +#include + +#include "7zCrc.h" +#include "Alloc.h" +#include "Bra.h" +#include "CpuArch.h" +#include "Delta.h" +#include "Lzma2Dec.h" + +// #define USE_SUBBLOCK + +#ifdef USE_SUBBLOCK +#include "Bcj3Dec.c" +#include "SbDec.h" +#endif + +#include "Xz.h" + +#define XZ_CHECK_SIZE_MAX 64 + +#define CODER_BUF_SIZE ((size_t)1 << 17) + +unsigned Xz_ReadVarInt(const Byte *p, size_t maxSize, UInt64 *value) +{ + unsigned i, limit; + *value = 0; + limit = (maxSize > 9) ? 9 : (unsigned)maxSize; + + for (i = 0; i < limit;) + { + Byte b = p[i]; + *value |= (UInt64)(b & 0x7F) << (7 * i++); + if ((b & 0x80) == 0) + return (b == 0 && i != 1) ? 0 : i; + } + return 0; +} + +/* ---------- BraState ---------- */ + +#define BRA_BUF_SIZE (1 << 14) + +typedef struct +{ + size_t bufPos; + size_t bufConv; + size_t bufTotal; + + int encodeMode; + + UInt32 methodId; + UInt32 delta; + UInt32 ip; + UInt32 x86State; + Byte deltaState[DELTA_STATE_SIZE]; + + Byte buf[BRA_BUF_SIZE]; +} CBraState; + +static void BraState_Free(void *pp, ISzAllocPtr alloc) +{ + ISzAlloc_Free(alloc, pp); +} + +static SRes BraState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc) +{ + CBraState *p = ((CBraState *)pp); + UNUSED_VAR(alloc); + p->ip = 0; + if (p->methodId == XZ_ID_Delta) + { + if (propSize != 1) + return SZ_ERROR_UNSUPPORTED; + p->delta = (unsigned)props[0] + 1; + } + else + { + if (propSize == 4) + { + UInt32 v = GetUi32(props); + switch (p->methodId) + { + case XZ_ID_PPC: + case XZ_ID_ARM: + case XZ_ID_SPARC: + if ((v & 3) != 0) + return SZ_ERROR_UNSUPPORTED; + break; + case XZ_ID_ARMT: + if ((v & 1) != 0) + return SZ_ERROR_UNSUPPORTED; + break; + case XZ_ID_IA64: + if ((v & 0xF) != 0) + return SZ_ERROR_UNSUPPORTED; + break; + } + p->ip = v; + } + else if (propSize != 0) + return SZ_ERROR_UNSUPPORTED; + } + return SZ_OK; +} + +static void BraState_Init(void *pp) +{ + CBraState *p = ((CBraState *)pp); + p->bufPos = p->bufConv = p->bufTotal = 0; + x86_Convert_Init(p->x86State); + if (p->methodId == XZ_ID_Delta) + Delta_Init(p->deltaState); +} + + +#define CASE_BRA_CONV(isa) case XZ_ID_ ## isa: size = isa ## _Convert(data, size, p->ip, p->encodeMode); break; + +static SizeT BraState_Filter(void *pp, Byte *data, SizeT size) +{ + CBraState *p = ((CBraState *)pp); + switch (p->methodId) + { + case XZ_ID_Delta: + if (p->encodeMode) + Delta_Encode(p->deltaState, p->delta, data, size); + else + Delta_Decode(p->deltaState, p->delta, data, size); + break; + case XZ_ID_X86: + size = x86_Convert(data, size, p->ip, &p->x86State, p->encodeMode); + break; + CASE_BRA_CONV(PPC) + CASE_BRA_CONV(IA64) + CASE_BRA_CONV(ARM) + CASE_BRA_CONV(ARMT) + CASE_BRA_CONV(SPARC) + } + p->ip += (UInt32)size; + return size; +} + + +static SRes BraState_Code2(void *pp, + Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, int srcWasFinished, + ECoderFinishMode finishMode, + // int *wasFinished + ECoderStatus *status) +{ + CBraState *p = ((CBraState *)pp); + SizeT destRem = *destLen; + SizeT srcRem = *srcLen; + UNUSED_VAR(finishMode); + + *destLen = 0; + *srcLen = 0; + // *wasFinished = False; + *status = CODER_STATUS_NOT_FINISHED; + + while (destRem > 0) + { + if (p->bufPos != p->bufConv) + { + size_t size = p->bufConv - p->bufPos; + if (size > destRem) + size = destRem; + memcpy(dest, p->buf + p->bufPos, size); + p->bufPos += size; + *destLen += size; + dest += size; + destRem -= size; + continue; + } + + p->bufTotal -= p->bufPos; + memmove(p->buf, p->buf + p->bufPos, p->bufTotal); + p->bufPos = 0; + p->bufConv = 0; + { + size_t size = BRA_BUF_SIZE - p->bufTotal; + if (size > srcRem) + size = srcRem; + memcpy(p->buf + p->bufTotal, src, size); + *srcLen += size; + src += size; + srcRem -= size; + p->bufTotal += size; + } + if (p->bufTotal == 0) + break; + + p->bufConv = BraState_Filter(pp, p->buf, p->bufTotal); + + if (p->bufConv == 0) + { + if (!srcWasFinished) + break; + p->bufConv = p->bufTotal; + } + } + + if (p->bufTotal == p->bufPos && srcRem == 0 && srcWasFinished) + { + *status = CODER_STATUS_FINISHED_WITH_MARK; + // *wasFinished = 1; + } + + return SZ_OK; +} + + +SRes BraState_SetFromMethod(IStateCoder *p, UInt64 id, int encodeMode, ISzAllocPtr alloc) +{ + CBraState *decoder; + if (id < XZ_ID_Delta || id > XZ_ID_SPARC) + return SZ_ERROR_UNSUPPORTED; + decoder = (CBraState *)p->p; + if (!decoder) + { + decoder = (CBraState *)ISzAlloc_Alloc(alloc, sizeof(CBraState)); + if (!decoder) + return SZ_ERROR_MEM; + p->p = decoder; + p->Free = BraState_Free; + p->SetProps = BraState_SetProps; + p->Init = BraState_Init; + p->Code2 = BraState_Code2; + p->Filter = BraState_Filter; + } + decoder->methodId = (UInt32)id; + decoder->encodeMode = encodeMode; + return SZ_OK; +} + + + +/* ---------- SbState ---------- */ + +#ifdef USE_SUBBLOCK + +static void SbState_Free(void *pp, ISzAllocPtr alloc) +{ + CSbDec *p = (CSbDec *)pp; + SbDec_Free(p); + ISzAlloc_Free(alloc, pp); +} + +static SRes SbState_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc) +{ + UNUSED_VAR(pp); + UNUSED_VAR(props); + UNUSED_VAR(alloc); + return (propSize == 0) ? SZ_OK : SZ_ERROR_UNSUPPORTED; +} + +static void SbState_Init(void *pp) +{ + SbDec_Init((CSbDec *)pp); +} + +static SRes SbState_Code2(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + int srcWasFinished, ECoderFinishMode finishMode, + // int *wasFinished + ECoderStatus *status) +{ + CSbDec *p = (CSbDec *)pp; + SRes res; + UNUSED_VAR(srcWasFinished); + p->dest = dest; + p->destLen = *destLen; + p->src = src; + p->srcLen = *srcLen; + p->finish = finishMode; /* change it */ + res = SbDec_Decode((CSbDec *)pp); + *destLen -= p->destLen; + *srcLen -= p->srcLen; + // *wasFinished = (*destLen == 0 && *srcLen == 0); /* change it */ + *status = (*destLen == 0 && *srcLen == 0) ? + CODER_STATUS_FINISHED_WITH_MARK : + CODER_STATUS_NOT_FINISHED; + return res; +} + +static SRes SbState_SetFromMethod(IStateCoder *p, ISzAllocPtr alloc) +{ + CSbDec *decoder = (CSbDec *)p->p; + if (!decoder) + { + decoder = (CSbDec *)ISzAlloc_Alloc(alloc, sizeof(CSbDec)); + if (!decoder) + return SZ_ERROR_MEM; + p->p = decoder; + p->Free = SbState_Free; + p->SetProps = SbState_SetProps; + p->Init = SbState_Init; + p->Code2 = SbState_Code2; + p->Filter = NULL; + } + SbDec_Construct(decoder); + SbDec_SetAlloc(decoder, alloc); + return SZ_OK; +} + +#endif + + + +/* ---------- Lzma2 ---------- */ + +typedef struct +{ + CLzma2Dec decoder; + BoolInt outBufMode; +} CLzma2Dec_Spec; + + +static void Lzma2State_Free(void *pp, ISzAllocPtr alloc) +{ + CLzma2Dec_Spec *p = (CLzma2Dec_Spec *)pp; + if (p->outBufMode) + Lzma2Dec_FreeProbs(&p->decoder, alloc); + else + Lzma2Dec_Free(&p->decoder, alloc); + ISzAlloc_Free(alloc, pp); +} + +static SRes Lzma2State_SetProps(void *pp, const Byte *props, size_t propSize, ISzAllocPtr alloc) +{ + if (propSize != 1) + return SZ_ERROR_UNSUPPORTED; + { + CLzma2Dec_Spec *p = (CLzma2Dec_Spec *)pp; + if (p->outBufMode) + return Lzma2Dec_AllocateProbs(&p->decoder, props[0], alloc); + else + return Lzma2Dec_Allocate(&p->decoder, props[0], alloc); + } +} + +static void Lzma2State_Init(void *pp) +{ + Lzma2Dec_Init(&((CLzma2Dec_Spec *)pp)->decoder); +} + + +/* + if (outBufMode), then (dest) is not used. Use NULL. + Data is unpacked to (spec->decoder.decoder.dic) output buffer. +*/ + +static SRes Lzma2State_Code2(void *pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, + int srcWasFinished, ECoderFinishMode finishMode, + // int *wasFinished, + ECoderStatus *status) +{ + CLzma2Dec_Spec *spec = (CLzma2Dec_Spec *)pp; + ELzmaStatus status2; + /* ELzmaFinishMode fm = (finishMode == LZMA_FINISH_ANY) ? LZMA_FINISH_ANY : LZMA_FINISH_END; */ + SRes res; + UNUSED_VAR(srcWasFinished); + if (spec->outBufMode) + { + SizeT dicPos = spec->decoder.decoder.dicPos; + SizeT dicLimit = dicPos + *destLen; + res = Lzma2Dec_DecodeToDic(&spec->decoder, dicLimit, src, srcLen, (ELzmaFinishMode)finishMode, &status2); + *destLen = spec->decoder.decoder.dicPos - dicPos; + } + else + res = Lzma2Dec_DecodeToBuf(&spec->decoder, dest, destLen, src, srcLen, (ELzmaFinishMode)finishMode, &status2); + // *wasFinished = (status2 == LZMA_STATUS_FINISHED_WITH_MARK); + // ECoderStatus values are identical to ELzmaStatus values of LZMA2 decoder + *status = (ECoderStatus)status2; + return res; +} + + +static SRes Lzma2State_SetFromMethod(IStateCoder *p, Byte *outBuf, size_t outBufSize, ISzAllocPtr alloc) +{ + CLzma2Dec_Spec *spec = (CLzma2Dec_Spec *)p->p; + if (!spec) + { + spec = (CLzma2Dec_Spec *)ISzAlloc_Alloc(alloc, sizeof(CLzma2Dec_Spec)); + if (!spec) + return SZ_ERROR_MEM; + p->p = spec; + p->Free = Lzma2State_Free; + p->SetProps = Lzma2State_SetProps; + p->Init = Lzma2State_Init; + p->Code2 = Lzma2State_Code2; + p->Filter = NULL; + Lzma2Dec_Construct(&spec->decoder); + } + spec->outBufMode = False; + if (outBuf) + { + spec->outBufMode = True; + spec->decoder.decoder.dic = outBuf; + spec->decoder.decoder.dicBufSize = outBufSize; + } + return SZ_OK; +} + + +static SRes Lzma2State_ResetOutBuf(IStateCoder *p, Byte *outBuf, size_t outBufSize) +{ + CLzma2Dec_Spec *spec = (CLzma2Dec_Spec *)p->p; + if ((spec->outBufMode && !outBuf) || (!spec->outBufMode && outBuf)) + return SZ_ERROR_FAIL; + if (outBuf) + { + spec->decoder.decoder.dic = outBuf; + spec->decoder.decoder.dicBufSize = outBufSize; + } + return SZ_OK; +} + + + +static void MixCoder_Construct(CMixCoder *p, ISzAllocPtr alloc) +{ + unsigned i; + p->alloc = alloc; + p->buf = NULL; + p->numCoders = 0; + + p->outBufSize = 0; + p->outBuf = NULL; + // p->SingleBufMode = False; + + for (i = 0; i < MIXCODER_NUM_FILTERS_MAX; i++) + p->coders[i].p = NULL; +} + + +static void MixCoder_Free(CMixCoder *p) +{ + unsigned i; + p->numCoders = 0; + for (i = 0; i < MIXCODER_NUM_FILTERS_MAX; i++) + { + IStateCoder *sc = &p->coders[i]; + if (sc->p) + { + sc->Free(sc->p, p->alloc); + sc->p = NULL; + } + } + if (p->buf) + { + ISzAlloc_Free(p->alloc, p->buf); + p->buf = NULL; /* 9.31: the BUG was fixed */ + } +} + +static void MixCoder_Init(CMixCoder *p) +{ + unsigned i; + for (i = 0; i < MIXCODER_NUM_FILTERS_MAX - 1; i++) + { + p->size[i] = 0; + p->pos[i] = 0; + p->finished[i] = 0; + } + for (i = 0; i < p->numCoders; i++) + { + IStateCoder *coder = &p->coders[i]; + coder->Init(coder->p); + p->results[i] = SZ_OK; + } + p->outWritten = 0; + p->wasFinished = False; + p->res = SZ_OK; + p->status = CODER_STATUS_NOT_SPECIFIED; +} + + +static SRes MixCoder_SetFromMethod(CMixCoder *p, unsigned coderIndex, UInt64 methodId, Byte *outBuf, size_t outBufSize) +{ + IStateCoder *sc = &p->coders[coderIndex]; + p->ids[coderIndex] = methodId; + switch (methodId) + { + case XZ_ID_LZMA2: return Lzma2State_SetFromMethod(sc, outBuf, outBufSize, p->alloc); + #ifdef USE_SUBBLOCK + case XZ_ID_Subblock: return SbState_SetFromMethod(sc, p->alloc); + #endif + } + if (coderIndex == 0) + return SZ_ERROR_UNSUPPORTED; + return BraState_SetFromMethod(sc, methodId, 0, p->alloc); +} + + +static SRes MixCoder_ResetFromMethod(CMixCoder *p, unsigned coderIndex, UInt64 methodId, Byte *outBuf, size_t outBufSize) +{ + IStateCoder *sc = &p->coders[coderIndex]; + switch (methodId) + { + case XZ_ID_LZMA2: return Lzma2State_ResetOutBuf(sc, outBuf, outBufSize); + } + return SZ_ERROR_UNSUPPORTED; +} + + + +/* + if (destFinish) - then unpack data block is finished at (*destLen) position, + and we can return data that were not processed by filter + +output (status) can be : + CODER_STATUS_NOT_FINISHED + CODER_STATUS_FINISHED_WITH_MARK + CODER_STATUS_NEEDS_MORE_INPUT - not implemented still +*/ + +static SRes MixCoder_Code(CMixCoder *p, + Byte *dest, SizeT *destLen, int destFinish, + const Byte *src, SizeT *srcLen, int srcWasFinished, + ECoderFinishMode finishMode) +{ + SizeT destLenOrig = *destLen; + SizeT srcLenOrig = *srcLen; + + *destLen = 0; + *srcLen = 0; + + if (p->wasFinished) + return p->res; + + p->status = CODER_STATUS_NOT_FINISHED; + + // if (p->SingleBufMode) + if (p->outBuf) + { + SRes res; + SizeT destLen2, srcLen2; + int wasFinished; + + PRF_STR("------- MixCoder Single ----------"); + + srcLen2 = srcLenOrig; + destLen2 = destLenOrig; + + { + IStateCoder *coder = &p->coders[0]; + res = coder->Code2(coder->p, NULL, &destLen2, src, &srcLen2, srcWasFinished, finishMode, + // &wasFinished, + &p->status); + wasFinished = (p->status == CODER_STATUS_FINISHED_WITH_MARK); + } + + p->res = res; + + /* + if (wasFinished) + p->status = CODER_STATUS_FINISHED_WITH_MARK; + else + { + if (res == SZ_OK) + if (destLen2 != destLenOrig) + p->status = CODER_STATUS_NEEDS_MORE_INPUT; + } + */ + + + *srcLen = srcLen2; + src += srcLen2; + p->outWritten += destLen2; + + if (res != SZ_OK || srcWasFinished || wasFinished) + p->wasFinished = True; + + if (p->numCoders == 1) + *destLen = destLen2; + else if (p->wasFinished) + { + unsigned i; + size_t processed = p->outWritten; + + for (i = 1; i < p->numCoders; i++) + { + IStateCoder *coder = &p->coders[i]; + processed = coder->Filter(coder->p, p->outBuf, processed); + if (wasFinished || (destFinish && p->outWritten == destLenOrig)) + processed = p->outWritten; + PRF_STR_INT("filter", i); + } + *destLen = processed; + } + return res; + } + + PRF_STR("standard mix"); + + if (p->numCoders != 1) + { + if (!p->buf) + { + p->buf = (Byte *)ISzAlloc_Alloc(p->alloc, CODER_BUF_SIZE * (MIXCODER_NUM_FILTERS_MAX - 1)); + if (!p->buf) + return SZ_ERROR_MEM; + } + + finishMode = CODER_FINISH_ANY; + } + + for (;;) + { + BoolInt processed = False; + BoolInt allFinished = True; + SRes resMain = SZ_OK; + unsigned i; + + p->status = CODER_STATUS_NOT_FINISHED; + /* + if (p->numCoders == 1 && *destLen == destLenOrig && finishMode == LZMA_FINISH_ANY) + break; + */ + + for (i = 0; i < p->numCoders; i++) + { + SRes res; + IStateCoder *coder = &p->coders[i]; + Byte *dest2; + SizeT destLen2, srcLen2; // destLen2_Orig; + const Byte *src2; + int srcFinished2; + int encodingWasFinished; + ECoderStatus status2; + + if (i == 0) + { + src2 = src; + srcLen2 = srcLenOrig - *srcLen; + srcFinished2 = srcWasFinished; + } + else + { + size_t k = i - 1; + src2 = p->buf + (CODER_BUF_SIZE * k) + p->pos[k]; + srcLen2 = p->size[k] - p->pos[k]; + srcFinished2 = p->finished[k]; + } + + if (i == p->numCoders - 1) + { + dest2 = dest; + destLen2 = destLenOrig - *destLen; + } + else + { + if (p->pos[i] != p->size[i]) + continue; + dest2 = p->buf + (CODER_BUF_SIZE * i); + destLen2 = CODER_BUF_SIZE; + } + + // destLen2_Orig = destLen2; + + if (p->results[i] != SZ_OK) + { + if (resMain == SZ_OK) + resMain = p->results[i]; + continue; + } + + res = coder->Code2(coder->p, + dest2, &destLen2, + src2, &srcLen2, srcFinished2, + finishMode, + // &encodingWasFinished, + &status2); + + if (res != SZ_OK) + { + p->results[i] = res; + if (resMain == SZ_OK) + resMain = res; + } + + encodingWasFinished = (status2 == CODER_STATUS_FINISHED_WITH_MARK); + + if (!encodingWasFinished) + { + allFinished = False; + if (p->numCoders == 1 && res == SZ_OK) + p->status = status2; + } + + if (i == 0) + { + *srcLen += srcLen2; + src += srcLen2; + } + else + p->pos[(size_t)i - 1] += srcLen2; + + if (i == p->numCoders - 1) + { + *destLen += destLen2; + dest += destLen2; + } + else + { + p->size[i] = destLen2; + p->pos[i] = 0; + p->finished[i] = encodingWasFinished; + } + + if (destLen2 != 0 || srcLen2 != 0) + processed = True; + } + + if (!processed) + { + if (allFinished) + p->status = CODER_STATUS_FINISHED_WITH_MARK; + return resMain; + } + } +} + + +SRes Xz_ParseHeader(CXzStreamFlags *p, const Byte *buf) +{ + *p = (CXzStreamFlags)GetBe16(buf + XZ_SIG_SIZE); + if (CrcCalc(buf + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE) != + GetUi32(buf + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE)) + return SZ_ERROR_NO_ARCHIVE; + return XzFlags_IsSupported(*p) ? SZ_OK : SZ_ERROR_UNSUPPORTED; +} + +static BoolInt Xz_CheckFooter(CXzStreamFlags flags, UInt64 indexSize, const Byte *buf) +{ + return indexSize == (((UInt64)GetUi32(buf + 4) + 1) << 2) + && GetUi32(buf) == CrcCalc(buf + 4, 6) + && flags == GetBe16(buf + 8) + && buf[10] == XZ_FOOTER_SIG_0 + && buf[11] == XZ_FOOTER_SIG_1; +} + +#define READ_VARINT_AND_CHECK(buf, pos, size, res) \ + { unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \ + if (s == 0) return SZ_ERROR_ARCHIVE; pos += s; } + + +static BoolInt XzBlock_AreSupportedFilters(const CXzBlock *p) +{ + unsigned numFilters = XzBlock_GetNumFilters(p) - 1; + unsigned i; + { + const CXzFilter *f = &p->filters[numFilters]; + if (f->id != XZ_ID_LZMA2 || f->propsSize != 1 || f->props[0] > 40) + return False; + } + + for (i = 0; i < numFilters; i++) + { + const CXzFilter *f = &p->filters[i]; + if (f->id == XZ_ID_Delta) + { + if (f->propsSize != 1) + return False; + } + else if (f->id < XZ_ID_Delta + || f->id > XZ_ID_SPARC + || (f->propsSize != 0 && f->propsSize != 4)) + return False; + } + return True; +} + + +SRes XzBlock_Parse(CXzBlock *p, const Byte *header) +{ + unsigned pos; + unsigned numFilters, i; + unsigned headerSize = (unsigned)header[0] << 2; + + /* (headerSize != 0) : another code checks */ + + if (CrcCalc(header, headerSize) != GetUi32(header + headerSize)) + return SZ_ERROR_ARCHIVE; + + pos = 1; + p->flags = header[pos++]; + + p->packSize = (UInt64)(Int64)-1; + if (XzBlock_HasPackSize(p)) + { + READ_VARINT_AND_CHECK(header, pos, headerSize, &p->packSize); + if (p->packSize == 0 || p->packSize + headerSize >= (UInt64)1 << 63) + return SZ_ERROR_ARCHIVE; + } + + p->unpackSize = (UInt64)(Int64)-1; + if (XzBlock_HasUnpackSize(p)) + READ_VARINT_AND_CHECK(header, pos, headerSize, &p->unpackSize); + + numFilters = XzBlock_GetNumFilters(p); + for (i = 0; i < numFilters; i++) + { + CXzFilter *filter = p->filters + i; + UInt64 size; + READ_VARINT_AND_CHECK(header, pos, headerSize, &filter->id); + READ_VARINT_AND_CHECK(header, pos, headerSize, &size); + if (size > headerSize - pos || size > XZ_FILTER_PROPS_SIZE_MAX) + return SZ_ERROR_ARCHIVE; + filter->propsSize = (UInt32)size; + memcpy(filter->props, header + pos, (size_t)size); + pos += (unsigned)size; + + #ifdef XZ_DUMP + printf("\nf[%u] = %2X: ", i, (unsigned)filter->id); + { + unsigned i; + for (i = 0; i < size; i++) + printf(" %2X", filter->props[i]); + } + #endif + } + + if (XzBlock_HasUnsupportedFlags(p)) + return SZ_ERROR_UNSUPPORTED; + + while (pos < headerSize) + if (header[pos++] != 0) + return SZ_ERROR_ARCHIVE; + return SZ_OK; +} + + + + +static SRes XzDecMix_Init(CMixCoder *p, const CXzBlock *block, Byte *outBuf, size_t outBufSize) +{ + unsigned i; + BoolInt needReInit = True; + unsigned numFilters = XzBlock_GetNumFilters(block); + + if (numFilters == p->numCoders && ((p->outBuf && outBuf) || (!p->outBuf && !outBuf))) + { + needReInit = False; + for (i = 0; i < numFilters; i++) + if (p->ids[i] != block->filters[numFilters - 1 - i].id) + { + needReInit = True; + break; + } + } + + // p->SingleBufMode = (outBuf != NULL); + p->outBuf = outBuf; + p->outBufSize = outBufSize; + + // p->SingleBufMode = False; + // outBuf = NULL; + + if (needReInit) + { + MixCoder_Free(p); + for (i = 0; i < numFilters; i++) + { + RINOK(MixCoder_SetFromMethod(p, i, block->filters[numFilters - 1 - i].id, outBuf, outBufSize)); + } + p->numCoders = numFilters; + } + else + { + RINOK(MixCoder_ResetFromMethod(p, 0, block->filters[numFilters - 1].id, outBuf, outBufSize)); + } + + for (i = 0; i < numFilters; i++) + { + const CXzFilter *f = &block->filters[numFilters - 1 - i]; + IStateCoder *sc = &p->coders[i]; + RINOK(sc->SetProps(sc->p, f->props, f->propsSize, p->alloc)); + } + + MixCoder_Init(p); + return SZ_OK; +} + + + +void XzUnpacker_Init(CXzUnpacker *p) +{ + p->state = XZ_STATE_STREAM_HEADER; + p->pos = 0; + p->numStartedStreams = 0; + p->numFinishedStreams = 0; + p->numTotalBlocks = 0; + p->padSize = 0; + p->decodeOnlyOneBlock = 0; + + p->parseMode = False; + p->decodeToStreamSignature = False; + + // p->outBuf = NULL; + // p->outBufSize = 0; + p->outDataWritten = 0; +} + + +void XzUnpacker_SetOutBuf(CXzUnpacker *p, Byte *outBuf, size_t outBufSize) +{ + p->outBuf = outBuf; + p->outBufSize = outBufSize; +} + + +void XzUnpacker_Construct(CXzUnpacker *p, ISzAllocPtr alloc) +{ + MixCoder_Construct(&p->decoder, alloc); + p->outBuf = NULL; + p->outBufSize = 0; + XzUnpacker_Init(p); +} + + +void XzUnpacker_Free(CXzUnpacker *p) +{ + MixCoder_Free(&p->decoder); +} + + +void XzUnpacker_PrepareToRandomBlockDecoding(CXzUnpacker *p) +{ + p->indexSize = 0; + p->numBlocks = 0; + Sha256_Init(&p->sha); + p->state = XZ_STATE_BLOCK_HEADER; + p->pos = 0; + p->decodeOnlyOneBlock = 1; +} + + +static void XzUnpacker_UpdateIndex(CXzUnpacker *p, UInt64 packSize, UInt64 unpackSize) +{ + Byte temp[32]; + unsigned num = Xz_WriteVarInt(temp, packSize); + num += Xz_WriteVarInt(temp + num, unpackSize); + Sha256_Update(&p->sha, temp, num); + p->indexSize += num; + p->numBlocks++; +} + + + +SRes XzUnpacker_Code(CXzUnpacker *p, Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, int srcFinished, + ECoderFinishMode finishMode, ECoderStatus *status) +{ + SizeT destLenOrig = *destLen; + SizeT srcLenOrig = *srcLen; + *destLen = 0; + *srcLen = 0; + *status = CODER_STATUS_NOT_SPECIFIED; + + for (;;) + { + SizeT srcRem; + + if (p->state == XZ_STATE_BLOCK) + { + SizeT destLen2 = destLenOrig - *destLen; + SizeT srcLen2 = srcLenOrig - *srcLen; + SRes res; + + ECoderFinishMode finishMode2 = finishMode; + BoolInt srcFinished2 = srcFinished; + BoolInt destFinish = False; + + if (p->block.packSize != (UInt64)(Int64)-1) + { + UInt64 rem = p->block.packSize - p->packSize; + if (srcLen2 >= rem) + { + srcFinished2 = True; + srcLen2 = (SizeT)rem; + } + if (rem == 0 && p->block.unpackSize == p->unpackSize) + return SZ_ERROR_DATA; + } + + if (p->block.unpackSize != (UInt64)(Int64)-1) + { + UInt64 rem = p->block.unpackSize - p->unpackSize; + if (destLen2 >= rem) + { + destFinish = True; + finishMode2 = CODER_FINISH_END; + destLen2 = (SizeT)rem; + } + } + + /* + if (srcLen2 == 0 && destLen2 == 0) + { + *status = CODER_STATUS_NOT_FINISHED; + return SZ_OK; + } + */ + + { + res = MixCoder_Code(&p->decoder, + (p->outBuf ? NULL : dest), &destLen2, destFinish, + src, &srcLen2, srcFinished2, + finishMode2); + + *status = p->decoder.status; + XzCheck_Update(&p->check, (p->outBuf ? p->outBuf + p->outDataWritten : dest), destLen2); + if (!p->outBuf) + dest += destLen2; + p->outDataWritten += destLen2; + } + + (*srcLen) += srcLen2; + src += srcLen2; + p->packSize += srcLen2; + (*destLen) += destLen2; + p->unpackSize += destLen2; + + RINOK(res); + + if (*status != CODER_STATUS_FINISHED_WITH_MARK) + { + if (p->block.packSize == p->packSize + && *status == CODER_STATUS_NEEDS_MORE_INPUT) + { + PRF_STR("CODER_STATUS_NEEDS_MORE_INPUT"); + *status = CODER_STATUS_NOT_SPECIFIED; + return SZ_ERROR_DATA; + } + + return SZ_OK; + } + { + XzUnpacker_UpdateIndex(p, XzUnpacker_GetPackSizeForIndex(p), p->unpackSize); + p->state = XZ_STATE_BLOCK_FOOTER; + p->pos = 0; + p->alignPos = 0; + *status = CODER_STATUS_NOT_SPECIFIED; + + if ((p->block.packSize != (UInt64)(Int64)-1 && p->block.packSize != p->packSize) + || (p->block.unpackSize != (UInt64)(Int64)-1 && p->block.unpackSize != p->unpackSize)) + { + PRF_STR("ERROR: block.size mismatch"); + return SZ_ERROR_DATA; + } + } + // continue; + } + + srcRem = srcLenOrig - *srcLen; + + // XZ_STATE_BLOCK_FOOTER can transit to XZ_STATE_BLOCK_HEADER without input bytes + if (srcRem == 0 && p->state != XZ_STATE_BLOCK_FOOTER) + { + *status = CODER_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + + switch (p->state) + { + case XZ_STATE_STREAM_HEADER: + { + if (p->pos < XZ_STREAM_HEADER_SIZE) + { + if (p->pos < XZ_SIG_SIZE && *src != XZ_SIG[p->pos]) + return SZ_ERROR_NO_ARCHIVE; + if (p->decodeToStreamSignature) + return SZ_OK; + p->buf[p->pos++] = *src++; + (*srcLen)++; + } + else + { + RINOK(Xz_ParseHeader(&p->streamFlags, p->buf)); + p->numStartedStreams++; + p->indexSize = 0; + p->numBlocks = 0; + Sha256_Init(&p->sha); + p->state = XZ_STATE_BLOCK_HEADER; + p->pos = 0; + } + break; + } + + case XZ_STATE_BLOCK_HEADER: + { + if (p->pos == 0) + { + p->buf[p->pos++] = *src++; + (*srcLen)++; + if (p->buf[0] == 0) + { + if (p->decodeOnlyOneBlock) + return SZ_ERROR_DATA; + p->indexPreSize = 1 + Xz_WriteVarInt(p->buf + 1, p->numBlocks); + p->indexPos = p->indexPreSize; + p->indexSize += p->indexPreSize; + Sha256_Final(&p->sha, p->shaDigest); + Sha256_Init(&p->sha); + p->crc = CrcUpdate(CRC_INIT_VAL, p->buf, p->indexPreSize); + p->state = XZ_STATE_STREAM_INDEX; + break; + } + p->blockHeaderSize = ((UInt32)p->buf[0] << 2) + 4; + break; + } + + if (p->pos != p->blockHeaderSize) + { + UInt32 cur = p->blockHeaderSize - p->pos; + if (cur > srcRem) + cur = (UInt32)srcRem; + memcpy(p->buf + p->pos, src, cur); + p->pos += cur; + (*srcLen) += cur; + src += cur; + } + else + { + RINOK(XzBlock_Parse(&p->block, p->buf)); + if (!XzBlock_AreSupportedFilters(&p->block)) + return SZ_ERROR_UNSUPPORTED; + p->numTotalBlocks++; + p->state = XZ_STATE_BLOCK; + p->packSize = 0; + p->unpackSize = 0; + XzCheck_Init(&p->check, XzFlags_GetCheckType(p->streamFlags)); + if (p->parseMode) + { + p->headerParsedOk = True; + return SZ_OK; + } + RINOK(XzDecMix_Init(&p->decoder, &p->block, p->outBuf, p->outBufSize)); + } + break; + } + + case XZ_STATE_BLOCK_FOOTER: + { + if ((((unsigned)p->packSize + p->alignPos) & 3) != 0) + { + if (srcRem == 0) + { + *status = CODER_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + (*srcLen)++; + p->alignPos++; + if (*src++ != 0) + return SZ_ERROR_CRC; + } + else + { + UInt32 checkSize = XzFlags_GetCheckSize(p->streamFlags); + UInt32 cur = checkSize - p->pos; + if (cur != 0) + { + if (srcRem == 0) + { + *status = CODER_STATUS_NEEDS_MORE_INPUT; + return SZ_OK; + } + if (cur > srcRem) + cur = (UInt32)srcRem; + memcpy(p->buf + p->pos, src, cur); + p->pos += cur; + (*srcLen) += cur; + src += cur; + if (checkSize != p->pos) + break; + } + { + Byte digest[XZ_CHECK_SIZE_MAX]; + p->state = XZ_STATE_BLOCK_HEADER; + p->pos = 0; + if (XzCheck_Final(&p->check, digest) && memcmp(digest, p->buf, checkSize) != 0) + return SZ_ERROR_CRC; + if (p->decodeOnlyOneBlock) + { + *status = CODER_STATUS_FINISHED_WITH_MARK; + return SZ_OK; + } + } + } + break; + } + + case XZ_STATE_STREAM_INDEX: + { + if (p->pos < p->indexPreSize) + { + (*srcLen)++; + if (*src++ != p->buf[p->pos++]) + return SZ_ERROR_CRC; + } + else + { + if (p->indexPos < p->indexSize) + { + UInt64 cur = p->indexSize - p->indexPos; + if (srcRem > cur) + srcRem = (SizeT)cur; + p->crc = CrcUpdate(p->crc, src, srcRem); + Sha256_Update(&p->sha, src, srcRem); + (*srcLen) += srcRem; + src += srcRem; + p->indexPos += srcRem; + } + else if ((p->indexPos & 3) != 0) + { + Byte b = *src++; + p->crc = CRC_UPDATE_BYTE(p->crc, b); + (*srcLen)++; + p->indexPos++; + p->indexSize++; + if (b != 0) + return SZ_ERROR_CRC; + } + else + { + Byte digest[SHA256_DIGEST_SIZE]; + p->state = XZ_STATE_STREAM_INDEX_CRC; + p->indexSize += 4; + p->pos = 0; + Sha256_Final(&p->sha, digest); + if (memcmp(digest, p->shaDigest, SHA256_DIGEST_SIZE) != 0) + return SZ_ERROR_CRC; + } + } + break; + } + + case XZ_STATE_STREAM_INDEX_CRC: + { + if (p->pos < 4) + { + (*srcLen)++; + p->buf[p->pos++] = *src++; + } + else + { + p->state = XZ_STATE_STREAM_FOOTER; + p->pos = 0; + if (CRC_GET_DIGEST(p->crc) != GetUi32(p->buf)) + return SZ_ERROR_CRC; + } + break; + } + + case XZ_STATE_STREAM_FOOTER: + { + UInt32 cur = XZ_STREAM_FOOTER_SIZE - p->pos; + if (cur > srcRem) + cur = (UInt32)srcRem; + memcpy(p->buf + p->pos, src, cur); + p->pos += cur; + (*srcLen) += cur; + src += cur; + if (p->pos == XZ_STREAM_FOOTER_SIZE) + { + p->state = XZ_STATE_STREAM_PADDING; + p->numFinishedStreams++; + p->padSize = 0; + if (!Xz_CheckFooter(p->streamFlags, p->indexSize, p->buf)) + return SZ_ERROR_CRC; + } + break; + } + + case XZ_STATE_STREAM_PADDING: + { + if (*src != 0) + { + if (((UInt32)p->padSize & 3) != 0) + return SZ_ERROR_NO_ARCHIVE; + p->pos = 0; + p->state = XZ_STATE_STREAM_HEADER; + } + else + { + (*srcLen)++; + src++; + p->padSize++; + } + break; + } + + case XZ_STATE_BLOCK: break; /* to disable GCC warning */ + } + } + /* + if (p->state == XZ_STATE_FINISHED) + *status = CODER_STATUS_FINISHED_WITH_MARK; + return SZ_OK; + */ +} + + +SRes XzUnpacker_CodeFull(CXzUnpacker *p, Byte *dest, SizeT *destLen, + const Byte *src, SizeT *srcLen, + ECoderFinishMode finishMode, ECoderStatus *status) +{ + XzUnpacker_Init(p); + XzUnpacker_SetOutBuf(p, dest, *destLen); + + return XzUnpacker_Code(p, + NULL, destLen, + src, srcLen, True, + finishMode, status); +} + + +BoolInt XzUnpacker_IsBlockFinished(const CXzUnpacker *p) +{ + return (p->state == XZ_STATE_BLOCK_HEADER) && (p->pos == 0); +} + +BoolInt XzUnpacker_IsStreamWasFinished(const CXzUnpacker *p) +{ + return (p->state == XZ_STATE_STREAM_PADDING) && (((UInt32)p->padSize & 3) == 0); +} + +UInt64 XzUnpacker_GetExtraSize(const CXzUnpacker *p) +{ + UInt64 num = 0; + if (p->state == XZ_STATE_STREAM_PADDING) + num = p->padSize; + else if (p->state == XZ_STATE_STREAM_HEADER) + num = p->padSize + p->pos; + return num; +} + + + + + + + + + + + + + + + + + + + + + +#ifndef _7ZIP_ST +#include "MtDec.h" +#endif + + +void XzDecMtProps_Init(CXzDecMtProps *p) +{ + p->inBufSize_ST = 1 << 18; + p->outStep_ST = 1 << 20; + p->ignoreErrors = False; + + #ifndef _7ZIP_ST + p->numThreads = 1; + p->inBufSize_MT = 1 << 18; + p->memUseMax = sizeof(size_t) << 28; + #endif +} + + + +#ifndef _7ZIP_ST + +/* ---------- CXzDecMtThread ---------- */ + +typedef struct +{ + Byte *outBuf; + size_t outBufSize; + size_t outPreSize; + size_t inPreSize; + size_t inPreHeaderSize; + size_t blockPackSize_for_Index; // including block header and checksum. + size_t blockPackTotal; // including stream header, block header and checksum. + size_t inCodeSize; + size_t outCodeSize; + ECoderStatus status; + SRes codeRes; + BoolInt skipMode; + // BoolInt finishedWithMark; + EMtDecParseState parseState; + BoolInt parsing_Truncated; + BoolInt atBlockHeader; + CXzStreamFlags streamFlags; + // UInt64 numFinishedStreams + UInt64 numStreams; + UInt64 numTotalBlocks; + UInt64 numBlocks; + + BoolInt dec_created; + CXzUnpacker dec; + + Byte mtPad[1 << 7]; +} CXzDecMtThread; + +#endif + + +/* ---------- CXzDecMt ---------- */ + +typedef struct +{ + CAlignOffsetAlloc alignOffsetAlloc; + ISzAllocPtr allocMid; + + CXzDecMtProps props; + size_t unpackBlockMaxSize; + + ISeqInStream *inStream; + ISeqOutStream *outStream; + ICompressProgress *progress; + // CXzStatInfo *stat; + + BoolInt finishMode; + BoolInt outSize_Defined; + UInt64 outSize; + + UInt64 outProcessed; + UInt64 inProcessed; + UInt64 readProcessed; + BoolInt readWasFinished; + SRes readRes; + SRes writeRes; + + Byte *outBuf; + size_t outBufSize; + Byte *inBuf; + size_t inBufSize; + + CXzUnpacker dec; + + ECoderStatus status; + SRes codeRes; + + #ifndef _7ZIP_ST + BoolInt mainDecoderWasCalled; + // int statErrorDefined; + int finishedDecoderIndex; + + // global values that are used in Parse stage + CXzStreamFlags streamFlags; + // UInt64 numFinishedStreams + UInt64 numStreams; + UInt64 numTotalBlocks; + UInt64 numBlocks; + + // UInt64 numBadBlocks; + SRes mainErrorCode; + + BoolInt isBlockHeaderState_Parse; + BoolInt isBlockHeaderState_Write; + UInt64 outProcessed_Parse; + BoolInt parsing_Truncated; + + BoolInt mtc_WasConstructed; + CMtDec mtc; + CXzDecMtThread coders[MTDEC__THREADS_MAX]; + #endif + +} CXzDecMt; + + + +CXzDecMtHandle XzDecMt_Create(ISzAllocPtr alloc, ISzAllocPtr allocMid) +{ + CXzDecMt *p = (CXzDecMt *)ISzAlloc_Alloc(alloc, sizeof(CXzDecMt)); + if (!p) + return NULL; + + AlignOffsetAlloc_CreateVTable(&p->alignOffsetAlloc); + p->alignOffsetAlloc.baseAlloc = alloc; + p->alignOffsetAlloc.numAlignBits = 7; + p->alignOffsetAlloc.offset = 0; + + p->allocMid = allocMid; + + p->outBuf = NULL; + p->outBufSize = 0; + p->inBuf = NULL; + p->inBufSize = 0; + + XzUnpacker_Construct(&p->dec, &p->alignOffsetAlloc.vt); + + p->unpackBlockMaxSize = 0; + + XzDecMtProps_Init(&p->props); + + #ifndef _7ZIP_ST + p->mtc_WasConstructed = False; + { + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CXzDecMtThread *coder = &p->coders[i]; + coder->dec_created = False; + coder->outBuf = NULL; + coder->outBufSize = 0; + } + } + #endif + + return p; +} + + +#ifndef _7ZIP_ST + +static void XzDecMt_FreeOutBufs(CXzDecMt *p) +{ + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CXzDecMtThread *coder = &p->coders[i]; + if (coder->outBuf) + { + ISzAlloc_Free(p->allocMid, coder->outBuf); + coder->outBuf = NULL; + coder->outBufSize = 0; + } + } + p->unpackBlockMaxSize = 0; +} + +#endif + + + +static void XzDecMt_FreeSt(CXzDecMt *p) +{ + XzUnpacker_Free(&p->dec); + + if (p->outBuf) + { + ISzAlloc_Free(p->allocMid, p->outBuf); + p->outBuf = NULL; + } + p->outBufSize = 0; + + if (p->inBuf) + { + ISzAlloc_Free(p->allocMid, p->inBuf); + p->inBuf = NULL; + } + p->inBufSize = 0; +} + + +void XzDecMt_Destroy(CXzDecMtHandle pp) +{ + CXzDecMt *p = (CXzDecMt *)pp; + + XzDecMt_FreeSt(p); + + #ifndef _7ZIP_ST + + if (p->mtc_WasConstructed) + { + MtDec_Destruct(&p->mtc); + p->mtc_WasConstructed = False; + } + { + unsigned i; + for (i = 0; i < MTDEC__THREADS_MAX; i++) + { + CXzDecMtThread *t = &p->coders[i]; + if (t->dec_created) + { + // we don't need to free dict here + XzUnpacker_Free(&t->dec); + t->dec_created = False; + } + } + } + XzDecMt_FreeOutBufs(p); + + #endif + + ISzAlloc_Free(p->alignOffsetAlloc.baseAlloc, pp); +} + + + +#ifndef _7ZIP_ST + +static void XzDecMt_Callback_Parse(void *obj, unsigned coderIndex, CMtDecCallbackInfo *cc) +{ + CXzDecMt *me = (CXzDecMt *)obj; + CXzDecMtThread *coder = &me->coders[coderIndex]; + size_t srcSize = cc->srcSize; + + cc->srcSize = 0; + cc->outPos = 0; + cc->state = MTDEC_PARSE_CONTINUE; + + cc->canCreateNewThread = True; + + if (cc->startCall) + { + coder->outPreSize = 0; + coder->inPreSize = 0; + coder->inPreHeaderSize = 0; + coder->parseState = MTDEC_PARSE_CONTINUE; + coder->parsing_Truncated = False; + coder->skipMode = False; + coder->codeRes = SZ_OK; + coder->status = CODER_STATUS_NOT_SPECIFIED; + coder->inCodeSize = 0; + coder->outCodeSize = 0; + + coder->numStreams = me->numStreams; + coder->numTotalBlocks = me->numTotalBlocks; + coder->numBlocks = me->numBlocks; + + if (!coder->dec_created) + { + XzUnpacker_Construct(&coder->dec, &me->alignOffsetAlloc.vt); + coder->dec_created = True; + } + + XzUnpacker_Init(&coder->dec); + + if (me->isBlockHeaderState_Parse) + { + coder->dec.streamFlags = me->streamFlags; + coder->atBlockHeader = True; + XzUnpacker_PrepareToRandomBlockDecoding(&coder->dec); + } + else + { + coder->atBlockHeader = False; + me->isBlockHeaderState_Parse = True; + } + + coder->dec.numStartedStreams = me->numStreams; + coder->dec.numTotalBlocks = me->numTotalBlocks; + coder->dec.numBlocks = me->numBlocks; + } + + while (!coder->skipMode) + { + ECoderStatus status; + SRes res; + size_t srcSize2 = srcSize; + size_t destSize = (size_t)0 - 1; + + coder->dec.parseMode = True; + coder->dec.headerParsedOk = False; + + PRF_STR_INT("Parse", srcSize2); + + res = XzUnpacker_Code(&coder->dec, + NULL, &destSize, + cc->src, &srcSize2, cc->srcFinished, + CODER_FINISH_END, &status); + + // PRF(printf(" res = %d, srcSize2 = %d", res, (unsigned)srcSize2)); + + coder->codeRes = res; + coder->status = status; + cc->srcSize += srcSize2; + srcSize -= srcSize2; + coder->inPreHeaderSize += srcSize2; + coder->inPreSize = coder->inPreHeaderSize; + + if (res != SZ_OK) + { + cc->state = + coder->parseState = MTDEC_PARSE_END; + /* + if (res == SZ_ERROR_MEM) + return res; + return SZ_OK; + */ + return; // res; + } + + if (coder->dec.headerParsedOk) + { + const CXzBlock *block = &coder->dec.block; + if (XzBlock_HasUnpackSize(block) + // && block->unpackSize <= me->props.outBlockMax + && XzBlock_HasPackSize(block)) + { + { + if (block->unpackSize * 2 * me->mtc.numStartedThreads > me->props.memUseMax) + { + cc->state = MTDEC_PARSE_OVERFLOW; + return; // SZ_OK; + } + } + { + UInt64 packSize = block->packSize; + UInt64 packSizeAligned = packSize + ((0 - (unsigned)packSize) & 3); + UInt32 checkSize = XzFlags_GetCheckSize(coder->dec.streamFlags); + UInt64 blockPackSum = coder->inPreSize + packSizeAligned + checkSize; + // if (blockPackSum <= me->props.inBlockMax) + // unpackBlockMaxSize + { + coder->blockPackSize_for_Index = (size_t)(coder->dec.blockHeaderSize + packSize + checkSize); + coder->blockPackTotal = (size_t)blockPackSum; + coder->outPreSize = (size_t)block->unpackSize; + coder->streamFlags = coder->dec.streamFlags; + me->streamFlags = coder->dec.streamFlags; + coder->skipMode = True; + break; + } + } + } + } + else + // if (coder->inPreSize <= me->props.inBlockMax) + { + if (!cc->srcFinished) + return; // SZ_OK; + cc->state = + coder->parseState = MTDEC_PARSE_END; + return; // SZ_OK; + } + cc->state = MTDEC_PARSE_OVERFLOW; + return; // SZ_OK; + } + + // ---------- skipMode ---------- + { + UInt64 rem = coder->blockPackTotal - coder->inPreSize; + size_t cur = srcSize; + if (cur > rem) + cur = (size_t)rem; + cc->srcSize += cur; + coder->inPreSize += cur; + srcSize -= cur; + + if (coder->inPreSize == coder->blockPackTotal) + { + if (srcSize == 0) + { + if (!cc->srcFinished) + return; // SZ_OK; + cc->state = MTDEC_PARSE_END; + } + else if ((cc->src)[cc->srcSize] == 0) // we check control byte of next block + cc->state = MTDEC_PARSE_END; + else + { + cc->state = MTDEC_PARSE_NEW; + + { + size_t blockMax = me->unpackBlockMaxSize; + if (blockMax < coder->outPreSize) + blockMax = coder->outPreSize; + { + UInt64 required = (UInt64)blockMax * (me->mtc.numStartedThreads + 1) * 2; + if (me->props.memUseMax < required) + cc->canCreateNewThread = False; + } + } + + if (me->outSize_Defined) + { + // next block can be zero size + const UInt64 rem2 = me->outSize - me->outProcessed_Parse; + if (rem2 < coder->outPreSize) + { + coder->parsing_Truncated = True; + cc->state = MTDEC_PARSE_END; + } + me->outProcessed_Parse += coder->outPreSize; + } + } + } + else if (cc->srcFinished) + cc->state = MTDEC_PARSE_END; + else + return; // SZ_OK; + + coder->parseState = cc->state; + cc->outPos = coder->outPreSize; + + me->numStreams = coder->dec.numStartedStreams; + me->numTotalBlocks = coder->dec.numTotalBlocks; + me->numBlocks = coder->dec.numBlocks + 1; + return; // SZ_OK; + } +} + + +static SRes XzDecMt_Callback_PreCode(void *pp, unsigned coderIndex) +{ + CXzDecMt *me = (CXzDecMt *)pp; + CXzDecMtThread *coder = &me->coders[coderIndex]; + Byte *dest; + + if (!coder->dec.headerParsedOk) + return SZ_OK; + + dest = coder->outBuf; + + if (!dest || coder->outBufSize < coder->outPreSize) + { + if (dest) + { + ISzAlloc_Free(me->allocMid, dest); + coder->outBuf = NULL; + coder->outBufSize = 0; + } + { + size_t outPreSize = coder->outPreSize; + if (outPreSize == 0) + outPreSize = 1; + dest = (Byte *)ISzAlloc_Alloc(me->allocMid, outPreSize); + } + if (!dest) + return SZ_ERROR_MEM; + coder->outBuf = dest; + coder->outBufSize = coder->outPreSize; + + if (coder->outBufSize > me->unpackBlockMaxSize) + me->unpackBlockMaxSize = coder->outBufSize; + } + + // return SZ_ERROR_MEM; + + XzUnpacker_SetOutBuf(&coder->dec, coder->outBuf, coder->outBufSize); + + { + SRes res = XzDecMix_Init(&coder->dec.decoder, &coder->dec.block, coder->outBuf, coder->outBufSize); + // res = SZ_ERROR_UNSUPPORTED; // to test + coder->codeRes = res; + if (res != SZ_OK) + { + // if (res == SZ_ERROR_MEM) return res; + if (me->props.ignoreErrors && res != SZ_ERROR_MEM) + return S_OK; + return res; + } + } + + return SZ_OK; +} + + +static SRes XzDecMt_Callback_Code(void *pp, unsigned coderIndex, + const Byte *src, size_t srcSize, int srcFinished, + // int finished, int blockFinished, + UInt64 *inCodePos, UInt64 *outCodePos, int *stop) +{ + CXzDecMt *me = (CXzDecMt *)pp; + CXzDecMtThread *coder = &me->coders[coderIndex]; + + *inCodePos = coder->inCodeSize; + *outCodePos = coder->outCodeSize; + *stop = True; + + if (coder->inCodeSize < coder->inPreHeaderSize) + { + UInt64 rem = coder->inPreHeaderSize - coder->inCodeSize; + size_t step = srcSize; + if (step > rem) + step = (size_t)rem; + src += step; + srcSize -= step; + coder->inCodeSize += step; + if (coder->inCodeSize < coder->inPreHeaderSize) + { + *stop = False; + return SZ_OK; + } + } + + if (!coder->dec.headerParsedOk) + return SZ_OK; + if (!coder->outBuf) + return SZ_OK; + + if (coder->codeRes == SZ_OK) + { + ECoderStatus status; + SRes res; + size_t srcProcessed = srcSize; + size_t outSizeCur = coder->outPreSize - coder->dec.outDataWritten; + + // PRF(printf("\nCallback_Code: Code %d %d\n", (unsigned)srcSize, (unsigned)outSizeCur)); + + res = XzUnpacker_Code(&coder->dec, + NULL, &outSizeCur, + src, &srcProcessed, srcFinished, + // coder->finishedWithMark ? CODER_FINISH_END : CODER_FINISH_ANY, + CODER_FINISH_END, + &status); + + // PRF(printf(" res = %d, srcSize2 = %d, outSizeCur = %d", res, (unsigned)srcProcessed, (unsigned)outSizeCur)); + + coder->codeRes = res; + coder->status = status; + coder->inCodeSize += srcProcessed; + coder->outCodeSize = coder->dec.outDataWritten; + *inCodePos = coder->inCodeSize; + *outCodePos = coder->outCodeSize; + + if (res == SZ_OK) + { + if (srcProcessed == srcSize) + *stop = False; + return SZ_OK; + } + } + + if (me->props.ignoreErrors && coder->codeRes != SZ_ERROR_MEM) + { + *inCodePos = coder->inPreSize; + *outCodePos = coder->outPreSize; + return S_OK; + } + return coder->codeRes; +} + + +#define XZDECMT_STREAM_WRITE_STEP (1 << 24) + +static SRes XzDecMt_Callback_Write(void *pp, unsigned coderIndex, + BoolInt needWriteToStream, + const Byte *src, size_t srcSize, + // int srcFinished, + BoolInt *needContinue, + BoolInt *canRecode) +{ + CXzDecMt *me = (CXzDecMt *)pp; + const CXzDecMtThread *coder = &me->coders[coderIndex]; + + // PRF(printf("\nWrite processed = %d srcSize = %d\n", (unsigned)me->mtc.inProcessed, (unsigned)srcSize)); + + *needContinue = False; + *canRecode = True; + + if (!needWriteToStream) + return SZ_OK; + + if (!coder->dec.headerParsedOk || !coder->outBuf) + { + if (me->finishedDecoderIndex < 0) + me->finishedDecoderIndex = coderIndex; + return SZ_OK; + } + + if (me->finishedDecoderIndex >= 0) + return SZ_OK; + + me->mtc.inProcessed += coder->inCodeSize; + + *canRecode = False; + + { + SRes res; + size_t size = coder->outCodeSize; + Byte *data = coder->outBuf; + + // we use in me->dec: sha, numBlocks, indexSize + + if (!me->isBlockHeaderState_Write) + { + XzUnpacker_PrepareToRandomBlockDecoding(&me->dec); + me->dec.decodeOnlyOneBlock = False; + me->dec.numStartedStreams = coder->dec.numStartedStreams; + me->dec.streamFlags = coder->streamFlags; + + me->isBlockHeaderState_Write = True; + } + + me->dec.numTotalBlocks = coder->dec.numTotalBlocks; + XzUnpacker_UpdateIndex(&me->dec, coder->blockPackSize_for_Index, coder->outPreSize); + + if (coder->outPreSize != size) + { + if (me->props.ignoreErrors) + { + memset(data + size, 0, coder->outPreSize - size); + size = coder->outPreSize; + } + // me->numBadBlocks++; + if (me->mainErrorCode == SZ_OK) + { + if ((int)coder->status == LZMA_STATUS_NEEDS_MORE_INPUT) + me->mainErrorCode = SZ_ERROR_INPUT_EOF; + else + me->mainErrorCode = SZ_ERROR_DATA; + } + } + + if (me->writeRes != SZ_OK) + return me->writeRes; + + res = SZ_OK; + { + if (me->outSize_Defined) + { + const UInt64 rem = me->outSize - me->outProcessed; + if (size > rem) + size = (SizeT)rem; + } + + for (;;) + { + size_t cur = size; + size_t written; + if (cur > XZDECMT_STREAM_WRITE_STEP) + cur = XZDECMT_STREAM_WRITE_STEP; + + written = ISeqOutStream_Write(me->outStream, data, cur); + + // PRF(printf("\nWritten ask = %d written = %d\n", (unsigned)cur, (unsigned)written)); + + me->outProcessed += written; + if (written != cur) + { + me->writeRes = SZ_ERROR_WRITE; + res = me->writeRes; + break; + } + data += cur; + size -= cur; + // PRF_STR_INT("Written size =", size); + if (size == 0) + break; + res = MtProgress_ProgressAdd(&me->mtc.mtProgress, 0, 0); + if (res != SZ_OK) + break; + } + } + + if (coder->codeRes != SZ_OK) + if (!me->props.ignoreErrors) + { + me->finishedDecoderIndex = coderIndex; + return res; + } + + RINOK(res); + + if (coder->inPreSize != coder->inCodeSize + || coder->blockPackTotal != coder->inCodeSize) + { + me->finishedDecoderIndex = coderIndex; + return SZ_OK; + } + + if (coder->parseState != MTDEC_PARSE_END) + { + *needContinue = True; + return SZ_OK; + } + } + + // (coder->state == MTDEC_PARSE_END) means that there are no other working threads + // so we can use mtc variables without lock + + PRF_STR_INT("Write MTDEC_PARSE_END", me->mtc.inProcessed); + + me->mtc.mtProgress.totalInSize = me->mtc.inProcessed; + { + CXzUnpacker *dec = &me->dec; + + PRF_STR_INT("PostSingle", srcSize); + + { + size_t srcProcessed = srcSize; + ECoderStatus status; + size_t outSizeCur = 0; + SRes res; + + // dec->decodeOnlyOneBlock = False; + dec->decodeToStreamSignature = True; + + me->mainDecoderWasCalled = True; + + if (coder->parsing_Truncated) + { + me->parsing_Truncated = True; + return SZ_OK; + } + + res = XzUnpacker_Code(dec, + NULL, &outSizeCur, + src, &srcProcessed, + me->mtc.readWasFinished, // srcFinished + CODER_FINISH_END, // CODER_FINISH_ANY, + &status); + + me->status = status; + me->codeRes = res; + + me->mtc.inProcessed += srcProcessed; + me->mtc.mtProgress.totalInSize = me->mtc.inProcessed; + + if (res != SZ_OK) + { + return S_OK; + // return res; + } + + if (dec->state == XZ_STATE_STREAM_HEADER) + { + *needContinue = True; + me->isBlockHeaderState_Parse = False; + me->isBlockHeaderState_Write = False; + { + Byte *crossBuf = MtDec_GetCrossBuff(&me->mtc); + if (!crossBuf) + return SZ_ERROR_MEM; + memcpy(crossBuf, src + srcProcessed, srcSize - srcProcessed); + } + me->mtc.crossStart = 0; + me->mtc.crossEnd = srcSize - srcProcessed; + return SZ_OK; + } + + if (status != CODER_STATUS_NEEDS_MORE_INPUT) + { + return E_FAIL; + } + + if (me->mtc.readWasFinished) + { + return SZ_OK; + } + } + + { + size_t inPos; + size_t inLim; + const Byte *inData; + UInt64 inProgressPrev = me->mtc.inProcessed; + + // XzDecMt_Prepare_InBuf_ST(p); + Byte *crossBuf = MtDec_GetCrossBuff(&me->mtc); + if (!crossBuf) + return SZ_ERROR_MEM; + + inPos = 0; + inLim = 0; + // outProcessed = 0; + + inData = crossBuf; + + for (;;) + { + SizeT inProcessed; + SizeT outProcessed; + ECoderStatus status; + SRes res; + + if (inPos == inLim) + { + if (!me->mtc.readWasFinished) + { + inPos = 0; + inLim = me->mtc.inBufSize; + me->mtc.readRes = ISeqInStream_Read(me->inStream, (void *)inData, &inLim); + me->mtc.readProcessed += inLim; + if (inLim == 0 || me->mtc.readRes != SZ_OK) + me->mtc.readWasFinished = True; + } + } + + inProcessed = inLim - inPos; + outProcessed = 0; + + res = XzUnpacker_Code(dec, + NULL, &outProcessed, + inData + inPos, &inProcessed, + (inProcessed == 0), // srcFinished + CODER_FINISH_END, &status); + + me->codeRes = res; + me->status = status; + inPos += inProcessed; + me->mtc.inProcessed += inProcessed; + me->mtc.mtProgress.totalInSize = me->mtc.inProcessed; + + if (res != SZ_OK) + { + return S_OK; + // return res; + } + + if (dec->state == XZ_STATE_STREAM_HEADER) + { + *needContinue = True; + me->mtc.crossStart = inPos; + me->mtc.crossEnd = inLim; + me->isBlockHeaderState_Parse = False; + me->isBlockHeaderState_Write = False; + return SZ_OK; + } + + if (status != CODER_STATUS_NEEDS_MORE_INPUT) + return E_FAIL; + + if (me->mtc.progress) + { + UInt64 inDelta = me->mtc.inProcessed - inProgressPrev; + if (inDelta >= (1 << 22)) + { + RINOK(MtProgress_Progress_ST(&me->mtc.mtProgress)); + inProgressPrev = me->mtc.inProcessed; + } + } + if (me->mtc.readWasFinished) + return SZ_OK; + } + } + } +} + + +#endif + + + +void XzStatInfo_Clear(CXzStatInfo *p) +{ + p->InSize = 0; + p->OutSize = 0; + + p->NumStreams = 0; + p->NumBlocks = 0; + + p->UnpackSize_Defined = False; + + p->NumStreams_Defined = False; + p->NumBlocks_Defined = False; + + // p->IsArc = False; + // p->UnexpectedEnd = False; + // p->Unsupported = False; + // p->HeadersError = False; + // p->DataError = False; + // p->CrcError = False; + + p->DataAfterEnd = False; + p->DecodingTruncated = False; + + p->DecodeRes = SZ_OK; + p->ReadRes = SZ_OK; + p->ProgressRes = SZ_OK; + + p->CombinedRes = SZ_OK; + p->CombinedRes_Type = SZ_OK; +} + + + + +static SRes XzDecMt_Decode_ST(CXzDecMt *p + #ifndef _7ZIP_ST + , BoolInt tMode + #endif + , CXzStatInfo *stat) +{ + size_t outPos; + size_t inPos, inLim; + const Byte *inData; + UInt64 inPrev, outPrev; + + CXzUnpacker *dec; + + #ifndef _7ZIP_ST + if (tMode) + { + XzDecMt_FreeOutBufs(p); + tMode = MtDec_PrepareRead(&p->mtc); + } + #endif + + if (!p->outBuf || p->outBufSize != p->props.outStep_ST) + { + ISzAlloc_Free(p->allocMid, p->outBuf); + p->outBufSize = 0; + p->outBuf = (Byte *)ISzAlloc_Alloc(p->allocMid, p->props.outStep_ST); + if (!p->outBuf) + return SZ_ERROR_MEM; + p->outBufSize = p->props.outStep_ST; + } + + if (!p->inBuf || p->inBufSize != p->props.inBufSize_ST) + { + ISzAlloc_Free(p->allocMid, p->inBuf); + p->inBufSize = 0; + p->inBuf = (Byte *)ISzAlloc_Alloc(p->allocMid, p->props.inBufSize_ST); + if (!p->inBuf) + return SZ_ERROR_MEM; + p->inBufSize = p->props.inBufSize_ST; + } + + dec = &p->dec; + dec->decodeToStreamSignature = False; + // dec->decodeOnlyOneBlock = False; + + XzUnpacker_SetOutBuf(dec, NULL, 0); + + inPrev = p->inProcessed; + outPrev = p->outProcessed; + + inPos = 0; + inLim = 0; + inData = NULL; + outPos = 0; + + for (;;) + { + SizeT outSize; + BoolInt finished; + ECoderFinishMode finishMode; + SizeT inProcessed; + ECoderStatus status; + SRes res; + + SizeT outProcessed; + + + + if (inPos == inLim) + { + #ifndef _7ZIP_ST + if (tMode) + { + inData = MtDec_Read(&p->mtc, &inLim); + inPos = 0; + if (inData) + continue; + tMode = False; + inLim = 0; + } + #endif + + if (!p->readWasFinished) + { + inPos = 0; + inLim = p->inBufSize; + inData = p->inBuf; + p->readRes = ISeqInStream_Read(p->inStream, (void *)inData, &inLim); + p->readProcessed += inLim; + if (inLim == 0 || p->readRes != SZ_OK) + p->readWasFinished = True; + } + } + + outSize = p->props.outStep_ST - outPos; + + finishMode = CODER_FINISH_ANY; + if (p->outSize_Defined) + { + const UInt64 rem = p->outSize - p->outProcessed; + if (outSize >= rem) + { + outSize = (SizeT)rem; + if (p->finishMode) + finishMode = CODER_FINISH_END; + } + } + + inProcessed = inLim - inPos; + outProcessed = outSize; + + res = XzUnpacker_Code(dec, p->outBuf + outPos, &outProcessed, + inData + inPos, &inProcessed, + (inPos == inLim), // srcFinished + finishMode, &status); + + p->codeRes = res; + p->status = status; + + inPos += inProcessed; + outPos += outProcessed; + p->inProcessed += inProcessed; + p->outProcessed += outProcessed; + + finished = ((inProcessed == 0 && outProcessed == 0) || res != SZ_OK); + + if (finished || outProcessed >= outSize) + if (outPos != 0) + { + size_t written = ISeqOutStream_Write(p->outStream, p->outBuf, outPos); + p->outProcessed += written; + if (written != outPos) + { + stat->CombinedRes_Type = SZ_ERROR_WRITE; + return SZ_ERROR_WRITE; + } + outPos = 0; + } + + if (p->progress && res == SZ_OK) + { + UInt64 inDelta = p->inProcessed - inPrev; + UInt64 outDelta = p->outProcessed - outPrev; + if (inDelta >= (1 << 22) || outDelta >= (1 << 22)) + { + res = ICompressProgress_Progress(p->progress, p->inProcessed, p->outProcessed); + if (res != SZ_OK) + { + stat->CombinedRes_Type = SZ_ERROR_PROGRESS; + stat->ProgressRes = res; + return res; + } + inPrev = p->inProcessed; + outPrev = p->outProcessed; + } + } + + if (finished) + return res; + } +} + +static SRes XzStatInfo_SetStat(const CXzUnpacker *dec, + int finishMode, + UInt64 readProcessed, UInt64 inProcessed, + SRes res, ECoderStatus status, + BoolInt decodingTruncated, + CXzStatInfo *stat) +{ + UInt64 extraSize; + + stat->DecodingTruncated = (Byte)(decodingTruncated ? 1 : 0); + stat->InSize = inProcessed; + stat->NumStreams = dec->numStartedStreams; + stat->NumBlocks = dec->numTotalBlocks; + + stat->UnpackSize_Defined = True; + stat->NumStreams_Defined = True; + stat->NumBlocks_Defined = True; + + extraSize = XzUnpacker_GetExtraSize(dec); + + if (res == SZ_OK) + { + if (status == CODER_STATUS_NEEDS_MORE_INPUT) + { + // CODER_STATUS_NEEDS_MORE_INPUT is expected status for correct xz streams + extraSize = 0; + if (!XzUnpacker_IsStreamWasFinished(dec)) + res = SZ_ERROR_INPUT_EOF; + } + else if (!decodingTruncated || finishMode) // (status == CODER_STATUS_NOT_FINISHED) + res = SZ_ERROR_DATA; + } + else if (res == SZ_ERROR_NO_ARCHIVE) + { + /* + SZ_ERROR_NO_ARCHIVE is possible for 2 states: + XZ_STATE_STREAM_HEADER - if bad signature or bad CRC + XZ_STATE_STREAM_PADDING - if non-zero padding data + extraSize / inProcessed don't include "bad" byte + */ + if (inProcessed != extraSize) // if good streams before error + if (extraSize != 0 || readProcessed != inProcessed) + { + stat->DataAfterEnd = True; + // there is some good xz stream before. So we set SZ_OK + res = SZ_OK; + } + } + + stat->DecodeRes = res; + + stat->InSize -= extraSize; + return res; +} + + +SRes XzDecMt_Decode(CXzDecMtHandle pp, + const CXzDecMtProps *props, + const UInt64 *outDataSize, int finishMode, + ISeqOutStream *outStream, + // Byte *outBuf, size_t *outBufSize, + ISeqInStream *inStream, + // const Byte *inData, size_t inDataSize, + CXzStatInfo *stat, + int *isMT, + ICompressProgress *progress) +{ + CXzDecMt *p = (CXzDecMt *)pp; + #ifndef _7ZIP_ST + BoolInt tMode; + #endif + + XzStatInfo_Clear(stat); + + p->props = *props; + + p->inStream = inStream; + p->outStream = outStream; + p->progress = progress; + // p->stat = stat; + + p->outSize = 0; + p->outSize_Defined = False; + if (outDataSize) + { + p->outSize_Defined = True; + p->outSize = *outDataSize; + } + + p->finishMode = finishMode; + + // p->outSize = 457; p->outSize_Defined = True; p->finishMode = False; // for test + + p->writeRes = SZ_OK; + p->outProcessed = 0; + p->inProcessed = 0; + p->readProcessed = 0; + p->readWasFinished = False; + + p->codeRes = 0; + p->status = CODER_STATUS_NOT_SPECIFIED; + + XzUnpacker_Init(&p->dec); + + *isMT = False; + + /* + p->outBuf = NULL; + p->outBufSize = 0; + if (!outStream) + { + p->outBuf = outBuf; + p->outBufSize = *outBufSize; + *outBufSize = 0; + } + */ + + + #ifndef _7ZIP_ST + + p->isBlockHeaderState_Parse = False; + p->isBlockHeaderState_Write = False; + // p->numBadBlocks = 0; + p->mainErrorCode = SZ_OK; + p->mainDecoderWasCalled = False; + + tMode = False; + + if (p->props.numThreads > 1) + { + IMtDecCallback vt; + + // we just free ST buffers here + // but we still keep state variables, that was set in XzUnpacker_Init() + XzDecMt_FreeSt(p); + + p->outProcessed_Parse = 0; + p->parsing_Truncated = False; + + p->numStreams = 0; + p->numTotalBlocks = 0; + p->numBlocks = 0; + p->finishedDecoderIndex = -1; + + if (!p->mtc_WasConstructed) + { + p->mtc_WasConstructed = True; + MtDec_Construct(&p->mtc); + } + + p->mtc.mtCallback = &vt; + p->mtc.mtCallbackObject = p; + + p->mtc.progress = progress; + p->mtc.inStream = inStream; + p->mtc.alloc = &p->alignOffsetAlloc.vt; + // p->mtc.inData = inData; + // p->mtc.inDataSize = inDataSize; + p->mtc.inBufSize = p->props.inBufSize_MT; + // p->mtc.inBlockMax = p->props.inBlockMax; + p->mtc.numThreadsMax = p->props.numThreads; + + *isMT = True; + + vt.Parse = XzDecMt_Callback_Parse; + vt.PreCode = XzDecMt_Callback_PreCode; + vt.Code = XzDecMt_Callback_Code; + vt.Write = XzDecMt_Callback_Write; + + { + BoolInt needContinue; + + SRes res = MtDec_Code(&p->mtc); + + stat->InSize = p->mtc.inProcessed; + + p->inProcessed = p->mtc.inProcessed; + p->readRes = p->mtc.readRes; + p->readWasFinished = p->mtc.readWasFinished; + p->readProcessed = p->mtc.readProcessed; + + tMode = True; + needContinue = False; + + if (res == SZ_OK) + { + if (p->mtc.mtProgress.res != SZ_OK) + { + res = p->mtc.mtProgress.res; + stat->ProgressRes = res; + stat->CombinedRes_Type = SZ_ERROR_PROGRESS; + } + else + needContinue = p->mtc.needContinue; + } + + if (!needContinue) + { + SRes codeRes; + BoolInt truncated = False; + ECoderStatus status; + CXzUnpacker *dec; + + stat->OutSize = p->outProcessed; + + if (p->finishedDecoderIndex >= 0) + { + CXzDecMtThread *coder = &p->coders[(unsigned)p->finishedDecoderIndex]; + codeRes = coder->codeRes; + dec = &coder->dec; + status = coder->status; + } + else if (p->mainDecoderWasCalled) + { + codeRes = p->codeRes; + dec = &p->dec; + status = p->status; + truncated = p->parsing_Truncated; + } + else + return E_FAIL; + + XzStatInfo_SetStat(dec, p->finishMode, + p->mtc.readProcessed, p->mtc.inProcessed, + codeRes, status, + truncated, + stat); + + if (res == SZ_OK) + { + if (p->writeRes != SZ_OK) + { + res = p->writeRes; + stat->CombinedRes_Type = SZ_ERROR_WRITE; + } + else if (p->mtc.readRes != SZ_OK && p->mtc.inProcessed == p->mtc.readProcessed) + { + res = p->mtc.readRes; + stat->ReadRes = res; + stat->CombinedRes_Type = SZ_ERROR_READ; + } + else if (p->mainErrorCode != SZ_OK) + { + res = p->mainErrorCode; + } + } + + stat->CombinedRes = res; + if (stat->CombinedRes_Type == SZ_OK) + stat->CombinedRes_Type = res; + return res; + } + + PRF_STR("----- decoding ST -----"); + } + } + + #endif + + + *isMT = False; + + { + SRes res = XzDecMt_Decode_ST(p + #ifndef _7ZIP_ST + , tMode + #endif + , stat + ); + + XzStatInfo_SetStat(&p->dec, + p->finishMode, + p->readProcessed, p->inProcessed, + p->codeRes, p->status, + False, // truncated + stat); + + if (res == SZ_OK) + { + /* + if (p->writeRes != SZ_OK) + { + res = p->writeRes; + stat->CombinedRes_Type = SZ_ERROR_WRITE; + } + else + */ + if (p->readRes != SZ_OK && p->inProcessed == p->readProcessed) + { + res = p->readRes; + stat->ReadRes = res; + stat->CombinedRes_Type = SZ_ERROR_READ; + } + #ifndef _7ZIP_ST + else if (p->mainErrorCode != SZ_OK) + res = p->mainErrorCode; + #endif + } + + stat->CombinedRes = res; + if (stat->CombinedRes_Type == SZ_OK) + stat->CombinedRes_Type = res; + return res; + } +} diff --git a/3rdparty/7z/src/XzEnc.c b/3rdparty/7z/src/XzEnc.c new file mode 100644 index 0000000000..309eca949e --- /dev/null +++ b/3rdparty/7z/src/XzEnc.c @@ -0,0 +1,1329 @@ +/* XzEnc.c -- Xz Encode +2019-02-02 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include +#include + +#include "7zCrc.h" +#include "Bra.h" +#include "CpuArch.h" + +#ifdef USE_SUBBLOCK +#include "Bcj3Enc.c" +#include "SbFind.c" +#include "SbEnc.c" +#endif + +#include "XzEnc.h" + +// #define _7ZIP_ST + +#ifndef _7ZIP_ST +#include "MtCoder.h" +#else +#define MTCODER__THREADS_MAX 1 +#define MTCODER__BLOCKS_MAX 1 +#endif + +#define XZ_GET_PAD_SIZE(dataSize) ((4 - ((unsigned)(dataSize) & 3)) & 3) + +/* max pack size for LZMA2 block + check-64bytrs: */ +#define XZ_GET_MAX_BLOCK_PACK_SIZE(unpackSize) ((unpackSize) + ((unpackSize) >> 10) + 16 + 64) + +#define XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(unpackSize) (XZ_BLOCK_HEADER_SIZE_MAX + XZ_GET_MAX_BLOCK_PACK_SIZE(unpackSize)) + + +#define XzBlock_ClearFlags(p) (p)->flags = 0; +#define XzBlock_SetNumFilters(p, n) (p)->flags |= ((n) - 1); +#define XzBlock_SetHasPackSize(p) (p)->flags |= XZ_BF_PACK_SIZE; +#define XzBlock_SetHasUnpackSize(p) (p)->flags |= XZ_BF_UNPACK_SIZE; + + +static SRes WriteBytes(ISeqOutStream *s, const void *buf, size_t size) +{ + return (ISeqOutStream_Write(s, buf, size) == size) ? SZ_OK : SZ_ERROR_WRITE; +} + +static SRes WriteBytesUpdateCrc(ISeqOutStream *s, const void *buf, size_t size, UInt32 *crc) +{ + *crc = CrcUpdate(*crc, buf, size); + return WriteBytes(s, buf, size); +} + + +static SRes Xz_WriteHeader(CXzStreamFlags f, ISeqOutStream *s) +{ + UInt32 crc; + Byte header[XZ_STREAM_HEADER_SIZE]; + memcpy(header, XZ_SIG, XZ_SIG_SIZE); + header[XZ_SIG_SIZE] = (Byte)(f >> 8); + header[XZ_SIG_SIZE + 1] = (Byte)(f & 0xFF); + crc = CrcCalc(header + XZ_SIG_SIZE, XZ_STREAM_FLAGS_SIZE); + SetUi32(header + XZ_SIG_SIZE + XZ_STREAM_FLAGS_SIZE, crc); + return WriteBytes(s, header, XZ_STREAM_HEADER_SIZE); +} + + +static SRes XzBlock_WriteHeader(const CXzBlock *p, ISeqOutStream *s) +{ + Byte header[XZ_BLOCK_HEADER_SIZE_MAX]; + + unsigned pos = 1; + unsigned numFilters, i; + header[pos++] = p->flags; + + if (XzBlock_HasPackSize(p)) pos += Xz_WriteVarInt(header + pos, p->packSize); + if (XzBlock_HasUnpackSize(p)) pos += Xz_WriteVarInt(header + pos, p->unpackSize); + numFilters = XzBlock_GetNumFilters(p); + + for (i = 0; i < numFilters; i++) + { + const CXzFilter *f = &p->filters[i]; + pos += Xz_WriteVarInt(header + pos, f->id); + pos += Xz_WriteVarInt(header + pos, f->propsSize); + memcpy(header + pos, f->props, f->propsSize); + pos += f->propsSize; + } + + while ((pos & 3) != 0) + header[pos++] = 0; + + header[0] = (Byte)(pos >> 2); + SetUi32(header + pos, CrcCalc(header, pos)); + return WriteBytes(s, header, pos + 4); +} + + + + +typedef struct +{ + size_t numBlocks; + size_t size; + size_t allocated; + Byte *blocks; +} CXzEncIndex; + + +static void XzEncIndex_Construct(CXzEncIndex *p) +{ + p->numBlocks = 0; + p->size = 0; + p->allocated = 0; + p->blocks = NULL; +} + +static void XzEncIndex_Init(CXzEncIndex *p) +{ + p->numBlocks = 0; + p->size = 0; +} + +static void XzEncIndex_Free(CXzEncIndex *p, ISzAllocPtr alloc) +{ + if (p->blocks) + { + ISzAlloc_Free(alloc, p->blocks); + p->blocks = NULL; + } + p->numBlocks = 0; + p->size = 0; + p->allocated = 0; +} + + +static SRes XzEncIndex_ReAlloc(CXzEncIndex *p, size_t newSize, ISzAllocPtr alloc) +{ + Byte *blocks = (Byte *)ISzAlloc_Alloc(alloc, newSize); + if (!blocks) + return SZ_ERROR_MEM; + if (p->size != 0) + memcpy(blocks, p->blocks, p->size); + if (p->blocks) + ISzAlloc_Free(alloc, p->blocks); + p->blocks = blocks; + p->allocated = newSize; + return SZ_OK; +} + + +static SRes XzEncIndex_PreAlloc(CXzEncIndex *p, UInt64 numBlocks, UInt64 unpackSize, UInt64 totalSize, ISzAllocPtr alloc) +{ + UInt64 pos; + { + Byte buf[32]; + unsigned pos2 = Xz_WriteVarInt(buf, totalSize); + pos2 += Xz_WriteVarInt(buf + pos2, unpackSize); + pos = numBlocks * pos2; + } + + if (pos <= p->allocated - p->size) + return SZ_OK; + { + UInt64 newSize64 = p->size + pos; + size_t newSize = (size_t)newSize64; + if (newSize != newSize64) + return SZ_ERROR_MEM; + return XzEncIndex_ReAlloc(p, newSize, alloc); + } +} + + +static SRes XzEncIndex_AddIndexRecord(CXzEncIndex *p, UInt64 unpackSize, UInt64 totalSize, ISzAllocPtr alloc) +{ + Byte buf[32]; + unsigned pos = Xz_WriteVarInt(buf, totalSize); + pos += Xz_WriteVarInt(buf + pos, unpackSize); + + if (pos > p->allocated - p->size) + { + size_t newSize = p->allocated * 2 + 16 * 2; + if (newSize < p->size + pos) + return SZ_ERROR_MEM; + RINOK(XzEncIndex_ReAlloc(p, newSize, alloc)); + } + memcpy(p->blocks + p->size, buf, pos); + p->size += pos; + p->numBlocks++; + return SZ_OK; +} + + +static SRes XzEncIndex_WriteFooter(const CXzEncIndex *p, CXzStreamFlags flags, ISeqOutStream *s) +{ + Byte buf[32]; + UInt64 globalPos; + UInt32 crc = CRC_INIT_VAL; + unsigned pos = 1 + Xz_WriteVarInt(buf + 1, p->numBlocks); + + globalPos = pos; + buf[0] = 0; + RINOK(WriteBytesUpdateCrc(s, buf, pos, &crc)); + RINOK(WriteBytesUpdateCrc(s, p->blocks, p->size, &crc)); + globalPos += p->size; + + pos = XZ_GET_PAD_SIZE(globalPos); + buf[1] = 0; + buf[2] = 0; + buf[3] = 0; + globalPos += pos; + + crc = CrcUpdate(crc, buf + 4 - pos, pos); + SetUi32(buf + 4, CRC_GET_DIGEST(crc)); + + SetUi32(buf + 8 + 4, (UInt32)(globalPos >> 2)); + buf[8 + 8] = (Byte)(flags >> 8); + buf[8 + 9] = (Byte)(flags & 0xFF); + SetUi32(buf + 8, CrcCalc(buf + 8 + 4, 6)); + buf[8 + 10] = XZ_FOOTER_SIG_0; + buf[8 + 11] = XZ_FOOTER_SIG_1; + + return WriteBytes(s, buf + 4 - pos, pos + 4 + 12); +} + + + +/* ---------- CSeqCheckInStream ---------- */ + +typedef struct +{ + ISeqInStream vt; + ISeqInStream *realStream; + const Byte *data; + UInt64 limit; + UInt64 processed; + int realStreamFinished; + CXzCheck check; +} CSeqCheckInStream; + +static void SeqCheckInStream_Init(CSeqCheckInStream *p, unsigned checkMode) +{ + p->limit = (UInt64)(Int64)-1; + p->processed = 0; + p->realStreamFinished = 0; + XzCheck_Init(&p->check, checkMode); +} + +static void SeqCheckInStream_GetDigest(CSeqCheckInStream *p, Byte *digest) +{ + XzCheck_Final(&p->check, digest); +} + +static SRes SeqCheckInStream_Read(const ISeqInStream *pp, void *data, size_t *size) +{ + CSeqCheckInStream *p = CONTAINER_FROM_VTBL(pp, CSeqCheckInStream, vt); + size_t size2 = *size; + SRes res = SZ_OK; + + if (p->limit != (UInt64)(Int64)-1) + { + UInt64 rem = p->limit - p->processed; + if (size2 > rem) + size2 = (size_t)rem; + } + if (size2 != 0) + { + if (p->realStream) + { + res = ISeqInStream_Read(p->realStream, data, &size2); + p->realStreamFinished = (size2 == 0) ? 1 : 0; + } + else + memcpy(data, p->data + (size_t)p->processed, size2); + XzCheck_Update(&p->check, data, size2); + p->processed += size2; + } + *size = size2; + return res; +} + + +/* ---------- CSeqSizeOutStream ---------- */ + +typedef struct +{ + ISeqOutStream vt; + ISeqOutStream *realStream; + Byte *outBuf; + size_t outBufLimit; + UInt64 processed; +} CSeqSizeOutStream; + +static size_t SeqSizeOutStream_Write(const ISeqOutStream *pp, const void *data, size_t size) +{ + CSeqSizeOutStream *p = CONTAINER_FROM_VTBL(pp, CSeqSizeOutStream, vt); + if (p->realStream) + size = ISeqOutStream_Write(p->realStream, data, size); + else + { + if (size > p->outBufLimit - (size_t)p->processed) + return 0; + memcpy(p->outBuf + (size_t)p->processed, data, size); + } + p->processed += size; + return size; +} + + +/* ---------- CSeqInFilter ---------- */ + +#define FILTER_BUF_SIZE (1 << 20) + +typedef struct +{ + ISeqInStream p; + ISeqInStream *realStream; + IStateCoder StateCoder; + Byte *buf; + size_t curPos; + size_t endPos; + int srcWasFinished; +} CSeqInFilter; + + +SRes BraState_SetFromMethod(IStateCoder *p, UInt64 id, int encodeMode, ISzAllocPtr alloc); + +static SRes SeqInFilter_Init(CSeqInFilter *p, const CXzFilter *props, ISzAllocPtr alloc) +{ + if (!p->buf) + { + p->buf = (Byte *)ISzAlloc_Alloc(alloc, FILTER_BUF_SIZE); + if (!p->buf) + return SZ_ERROR_MEM; + } + p->curPos = p->endPos = 0; + p->srcWasFinished = 0; + RINOK(BraState_SetFromMethod(&p->StateCoder, props->id, 1, alloc)); + RINOK(p->StateCoder.SetProps(p->StateCoder.p, props->props, props->propsSize, alloc)); + p->StateCoder.Init(p->StateCoder.p); + return SZ_OK; +} + + +static SRes SeqInFilter_Read(const ISeqInStream *pp, void *data, size_t *size) +{ + CSeqInFilter *p = CONTAINER_FROM_VTBL(pp, CSeqInFilter, p); + size_t sizeOriginal = *size; + if (sizeOriginal == 0) + return SZ_OK; + *size = 0; + + for (;;) + { + if (!p->srcWasFinished && p->curPos == p->endPos) + { + p->curPos = 0; + p->endPos = FILTER_BUF_SIZE; + RINOK(ISeqInStream_Read(p->realStream, p->buf, &p->endPos)); + if (p->endPos == 0) + p->srcWasFinished = 1; + } + { + SizeT srcLen = p->endPos - p->curPos; + ECoderStatus status; + SRes res; + *size = sizeOriginal; + res = p->StateCoder.Code2(p->StateCoder.p, + (Byte *)data, size, + p->buf + p->curPos, &srcLen, + p->srcWasFinished, CODER_FINISH_ANY, + &status); + p->curPos += srcLen; + if (*size != 0 || srcLen == 0 || res != SZ_OK) + return res; + } + } +} + +static void SeqInFilter_Construct(CSeqInFilter *p) +{ + p->buf = NULL; + p->StateCoder.p = NULL; + p->p.Read = SeqInFilter_Read; +} + +static void SeqInFilter_Free(CSeqInFilter *p, ISzAllocPtr alloc) +{ + if (p->StateCoder.p) + { + p->StateCoder.Free(p->StateCoder.p, alloc); + p->StateCoder.p = NULL; + } + if (p->buf) + { + ISzAlloc_Free(alloc, p->buf); + p->buf = NULL; + } +} + + +/* ---------- CSbEncInStream ---------- */ + +#ifdef USE_SUBBLOCK + +typedef struct +{ + ISeqInStream vt; + ISeqInStream *inStream; + CSbEnc enc; +} CSbEncInStream; + +static SRes SbEncInStream_Read(const ISeqInStream *pp, void *data, size_t *size) +{ + CSbEncInStream *p = CONTAINER_FROM_VTBL(pp, CSbEncInStream, vt); + size_t sizeOriginal = *size; + if (sizeOriginal == 0) + return SZ_OK; + + for (;;) + { + if (p->enc.needRead && !p->enc.readWasFinished) + { + size_t processed = p->enc.needReadSizeMax; + RINOK(p->inStream->Read(p->inStream, p->enc.buf + p->enc.readPos, &processed)); + p->enc.readPos += processed; + if (processed == 0) + { + p->enc.readWasFinished = True; + p->enc.isFinalFinished = True; + } + p->enc.needRead = False; + } + + *size = sizeOriginal; + RINOK(SbEnc_Read(&p->enc, data, size)); + if (*size != 0 || !p->enc.needRead) + return SZ_OK; + } +} + +void SbEncInStream_Construct(CSbEncInStream *p, ISzAllocPtr alloc) +{ + SbEnc_Construct(&p->enc, alloc); + p->vt.Read = SbEncInStream_Read; +} + +SRes SbEncInStream_Init(CSbEncInStream *p) +{ + return SbEnc_Init(&p->enc); +} + +void SbEncInStream_Free(CSbEncInStream *p) +{ + SbEnc_Free(&p->enc); +} + +#endif + + + +/* ---------- CXzProps ---------- */ + + +void XzFilterProps_Init(CXzFilterProps *p) +{ + p->id = 0; + p->delta = 0; + p->ip = 0; + p->ipDefined = False; +} + +void XzProps_Init(CXzProps *p) +{ + p->checkId = XZ_CHECK_CRC32; + p->blockSize = XZ_PROPS__BLOCK_SIZE__AUTO; + p->numBlockThreads_Reduced = -1; + p->numBlockThreads_Max = -1; + p->numTotalThreads = -1; + p->reduceSize = (UInt64)(Int64)-1; + p->forceWriteSizesInHeader = 0; + // p->forceWriteSizesInHeader = 1; + + XzFilterProps_Init(&p->filterProps); + Lzma2EncProps_Init(&p->lzma2Props); +} + + +static void XzEncProps_Normalize_Fixed(CXzProps *p) +{ + UInt64 fileSize; + int t1, t1n, t2, t2r, t3; + { + CLzma2EncProps tp = p->lzma2Props; + if (tp.numTotalThreads <= 0) + tp.numTotalThreads = p->numTotalThreads; + Lzma2EncProps_Normalize(&tp); + t1n = tp.numTotalThreads; + } + + t1 = p->lzma2Props.numTotalThreads; + t2 = p->numBlockThreads_Max; + t3 = p->numTotalThreads; + + if (t2 > MTCODER__THREADS_MAX) + t2 = MTCODER__THREADS_MAX; + + if (t3 <= 0) + { + if (t2 <= 0) + t2 = 1; + t3 = t1n * t2; + } + else if (t2 <= 0) + { + t2 = t3 / t1n; + if (t2 == 0) + { + t1 = 1; + t2 = t3; + } + if (t2 > MTCODER__THREADS_MAX) + t2 = MTCODER__THREADS_MAX; + } + else if (t1 <= 0) + { + t1 = t3 / t2; + if (t1 == 0) + t1 = 1; + } + else + t3 = t1n * t2; + + p->lzma2Props.numTotalThreads = t1; + + t2r = t2; + + fileSize = p->reduceSize; + + if ((p->blockSize < fileSize || fileSize == (UInt64)(Int64)-1)) + p->lzma2Props.lzmaProps.reduceSize = p->blockSize; + + Lzma2EncProps_Normalize(&p->lzma2Props); + + t1 = p->lzma2Props.numTotalThreads; + + { + if (t2 > 1 && fileSize != (UInt64)(Int64)-1) + { + UInt64 numBlocks = fileSize / p->blockSize; + if (numBlocks * p->blockSize != fileSize) + numBlocks++; + if (numBlocks < (unsigned)t2) + { + t2r = (unsigned)numBlocks; + if (t2r == 0) + t2r = 1; + t3 = t1 * t2r; + } + } + } + + p->numBlockThreads_Max = t2; + p->numBlockThreads_Reduced = t2r; + p->numTotalThreads = t3; +} + + +static void XzProps_Normalize(CXzProps *p) +{ + /* we normalize xzProps properties, but we normalize only some of CXzProps::lzma2Props properties. + Lzma2Enc_SetProps() will normalize lzma2Props later. */ + + if (p->blockSize == XZ_PROPS__BLOCK_SIZE__SOLID) + { + p->lzma2Props.lzmaProps.reduceSize = p->reduceSize; + p->numBlockThreads_Reduced = 1; + p->numBlockThreads_Max = 1; + if (p->lzma2Props.numTotalThreads <= 0) + p->lzma2Props.numTotalThreads = p->numTotalThreads; + return; + } + else + { + CLzma2EncProps *lzma2 = &p->lzma2Props; + if (p->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO) + { + // xz-auto + p->lzma2Props.lzmaProps.reduceSize = p->reduceSize; + + if (lzma2->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID) + { + // if (xz-auto && lzma2-solid) - we use solid for both + p->blockSize = XZ_PROPS__BLOCK_SIZE__SOLID; + p->numBlockThreads_Reduced = 1; + p->numBlockThreads_Max = 1; + if (p->lzma2Props.numTotalThreads <= 0) + p->lzma2Props.numTotalThreads = p->numTotalThreads; + } + else + { + // if (xz-auto && (lzma2-auto || lzma2-fixed_) + // we calculate block size for lzma2 and use that block size for xz, lzma2 uses single-chunk per block + CLzma2EncProps tp = p->lzma2Props; + if (tp.numTotalThreads <= 0) + tp.numTotalThreads = p->numTotalThreads; + + Lzma2EncProps_Normalize(&tp); + + p->blockSize = tp.blockSize; // fixed or solid + p->numBlockThreads_Reduced = tp.numBlockThreads_Reduced; + p->numBlockThreads_Max = tp.numBlockThreads_Max; + if (lzma2->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO) + lzma2->blockSize = tp.blockSize; // fixed or solid, LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID + if (lzma2->lzmaProps.reduceSize > tp.blockSize && tp.blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID) + lzma2->lzmaProps.reduceSize = tp.blockSize; + lzma2->numBlockThreads_Reduced = 1; + lzma2->numBlockThreads_Max = 1; + return; + } + } + else + { + // xz-fixed + // we can use xz::reduceSize or xz::blockSize as base for lzmaProps::reduceSize + + p->lzma2Props.lzmaProps.reduceSize = p->reduceSize; + { + UInt64 r = p->reduceSize; + if (r > p->blockSize || r == (UInt64)(Int64)-1) + r = p->blockSize; + lzma2->lzmaProps.reduceSize = r; + } + if (lzma2->blockSize == LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO) + lzma2->blockSize = LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID; + else if (lzma2->blockSize > p->blockSize && lzma2->blockSize != LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID) + lzma2->blockSize = p->blockSize; + + XzEncProps_Normalize_Fixed(p); + } + } +} + + +/* ---------- CLzma2WithFilters ---------- */ + +typedef struct +{ + CLzma2EncHandle lzma2; + CSeqInFilter filter; + + #ifdef USE_SUBBLOCK + CSbEncInStream sb; + #endif +} CLzma2WithFilters; + + +static void Lzma2WithFilters_Construct(CLzma2WithFilters *p) +{ + p->lzma2 = NULL; + SeqInFilter_Construct(&p->filter); + + #ifdef USE_SUBBLOCK + SbEncInStream_Construct(&p->sb, alloc); + #endif +} + + +static SRes Lzma2WithFilters_Create(CLzma2WithFilters *p, ISzAllocPtr alloc, ISzAllocPtr bigAlloc) +{ + if (!p->lzma2) + { + p->lzma2 = Lzma2Enc_Create(alloc, bigAlloc); + if (!p->lzma2) + return SZ_ERROR_MEM; + } + return SZ_OK; +} + + +static void Lzma2WithFilters_Free(CLzma2WithFilters *p, ISzAllocPtr alloc) +{ + #ifdef USE_SUBBLOCK + SbEncInStream_Free(&p->sb); + #endif + + SeqInFilter_Free(&p->filter, alloc); + if (p->lzma2) + { + Lzma2Enc_Destroy(p->lzma2); + p->lzma2 = NULL; + } +} + + +typedef struct +{ + UInt64 unpackSize; + UInt64 totalSize; + size_t headerSize; +} CXzEncBlockInfo; + + +static SRes Xz_CompressBlock( + CLzma2WithFilters *lzmaf, + + ISeqOutStream *outStream, + Byte *outBufHeader, + Byte *outBufData, size_t outBufDataLimit, + + ISeqInStream *inStream, + // UInt64 expectedSize, + const Byte *inBuf, // used if (!inStream) + size_t inBufSize, // used if (!inStream), it's block size, props->blockSize is ignored + + const CXzProps *props, + ICompressProgress *progress, + int *inStreamFinished, /* only for inStream version */ + CXzEncBlockInfo *blockSizes, + ISzAllocPtr alloc, + ISzAllocPtr allocBig) +{ + CSeqCheckInStream checkInStream; + CSeqSizeOutStream seqSizeOutStream; + CXzBlock block; + unsigned filterIndex = 0; + CXzFilter *filter = NULL; + const CXzFilterProps *fp = &props->filterProps; + if (fp->id == 0) + fp = NULL; + + *inStreamFinished = False; + + RINOK(Lzma2WithFilters_Create(lzmaf, alloc, allocBig)); + + RINOK(Lzma2Enc_SetProps(lzmaf->lzma2, &props->lzma2Props)); + + XzBlock_ClearFlags(&block); + XzBlock_SetNumFilters(&block, 1 + (fp ? 1 : 0)); + + if (fp) + { + filter = &block.filters[filterIndex++]; + filter->id = fp->id; + filter->propsSize = 0; + + if (fp->id == XZ_ID_Delta) + { + filter->props[0] = (Byte)(fp->delta - 1); + filter->propsSize = 1; + } + else if (fp->ipDefined) + { + SetUi32(filter->props, fp->ip); + filter->propsSize = 4; + } + } + + { + CXzFilter *f = &block.filters[filterIndex++]; + f->id = XZ_ID_LZMA2; + f->propsSize = 1; + f->props[0] = Lzma2Enc_WriteProperties(lzmaf->lzma2); + } + + seqSizeOutStream.vt.Write = SeqSizeOutStream_Write; + seqSizeOutStream.realStream = outStream; + seqSizeOutStream.outBuf = outBufData; + seqSizeOutStream.outBufLimit = outBufDataLimit; + seqSizeOutStream.processed = 0; + + /* + if (expectedSize != (UInt64)(Int64)-1) + { + block.unpackSize = expectedSize; + if (props->blockSize != (UInt64)(Int64)-1) + if (expectedSize > props->blockSize) + block.unpackSize = props->blockSize; + XzBlock_SetHasUnpackSize(&block); + } + */ + + if (outStream) + { + RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt)); + } + + checkInStream.vt.Read = SeqCheckInStream_Read; + SeqCheckInStream_Init(&checkInStream, props->checkId); + + checkInStream.realStream = inStream; + checkInStream.data = inBuf; + checkInStream.limit = props->blockSize; + if (!inStream) + checkInStream.limit = inBufSize; + + if (fp) + { + #ifdef USE_SUBBLOCK + if (fp->id == XZ_ID_Subblock) + { + lzmaf->sb.inStream = &checkInStream.vt; + RINOK(SbEncInStream_Init(&lzmaf->sb)); + } + else + #endif + { + lzmaf->filter.realStream = &checkInStream.vt; + RINOK(SeqInFilter_Init(&lzmaf->filter, filter, alloc)); + } + } + + { + SRes res; + Byte *outBuf = NULL; + size_t outSize = 0; + BoolInt useStream = (fp || inStream); + // useStream = True; + + if (!useStream) + { + XzCheck_Update(&checkInStream.check, inBuf, inBufSize); + checkInStream.processed = inBufSize; + } + + if (!outStream) + { + outBuf = seqSizeOutStream.outBuf; // + (size_t)seqSizeOutStream.processed; + outSize = seqSizeOutStream.outBufLimit; // - (size_t)seqSizeOutStream.processed; + } + + res = Lzma2Enc_Encode2(lzmaf->lzma2, + outBuf ? NULL : &seqSizeOutStream.vt, + outBuf, + outBuf ? &outSize : NULL, + + useStream ? + (fp ? + ( + #ifdef USE_SUBBLOCK + (fp->id == XZ_ID_Subblock) ? &lzmaf->sb.vt: + #endif + &lzmaf->filter.p) : + &checkInStream.vt) : NULL, + + useStream ? NULL : inBuf, + useStream ? 0 : inBufSize, + + progress); + + if (outBuf) + seqSizeOutStream.processed += outSize; + + RINOK(res); + blockSizes->unpackSize = checkInStream.processed; + } + { + Byte buf[4 + 64]; + unsigned padSize = XZ_GET_PAD_SIZE(seqSizeOutStream.processed); + UInt64 packSize = seqSizeOutStream.processed; + + buf[0] = 0; + buf[1] = 0; + buf[2] = 0; + buf[3] = 0; + + SeqCheckInStream_GetDigest(&checkInStream, buf + 4); + RINOK(WriteBytes(&seqSizeOutStream.vt, buf + (4 - padSize), padSize + XzFlags_GetCheckSize((CXzStreamFlags)props->checkId))); + + blockSizes->totalSize = seqSizeOutStream.processed - padSize; + + if (!outStream) + { + seqSizeOutStream.outBuf = outBufHeader; + seqSizeOutStream.outBufLimit = XZ_BLOCK_HEADER_SIZE_MAX; + seqSizeOutStream.processed = 0; + + block.unpackSize = blockSizes->unpackSize; + XzBlock_SetHasUnpackSize(&block); + + block.packSize = packSize; + XzBlock_SetHasPackSize(&block); + + RINOK(XzBlock_WriteHeader(&block, &seqSizeOutStream.vt)); + + blockSizes->headerSize = (size_t)seqSizeOutStream.processed; + blockSizes->totalSize += seqSizeOutStream.processed; + } + } + + if (inStream) + *inStreamFinished = checkInStream.realStreamFinished; + else + { + *inStreamFinished = False; + if (checkInStream.processed != inBufSize) + return SZ_ERROR_FAIL; + } + + return SZ_OK; +} + + + +typedef struct +{ + ICompressProgress vt; + ICompressProgress *progress; + UInt64 inOffset; + UInt64 outOffset; +} CCompressProgress_XzEncOffset; + + +static SRes CompressProgress_XzEncOffset_Progress(const ICompressProgress *pp, UInt64 inSize, UInt64 outSize) +{ + const CCompressProgress_XzEncOffset *p = CONTAINER_FROM_VTBL(pp, CCompressProgress_XzEncOffset, vt); + inSize += p->inOffset; + outSize += p->outOffset; + return ICompressProgress_Progress(p->progress, inSize, outSize); +} + + + + +typedef struct +{ + ISzAllocPtr alloc; + ISzAllocPtr allocBig; + + CXzProps xzProps; + UInt64 expectedDataSize; + + CXzEncIndex xzIndex; + + CLzma2WithFilters lzmaf_Items[MTCODER__THREADS_MAX]; + + size_t outBufSize; /* size of allocated outBufs[i] */ + Byte *outBufs[MTCODER__BLOCKS_MAX]; + + #ifndef _7ZIP_ST + unsigned checkType; + ISeqOutStream *outStream; + BoolInt mtCoder_WasConstructed; + CMtCoder mtCoder; + CXzEncBlockInfo EncBlocks[MTCODER__BLOCKS_MAX]; + #endif + +} CXzEnc; + + +static void XzEnc_Construct(CXzEnc *p) +{ + unsigned i; + + XzEncIndex_Construct(&p->xzIndex); + + for (i = 0; i < MTCODER__THREADS_MAX; i++) + Lzma2WithFilters_Construct(&p->lzmaf_Items[i]); + + #ifndef _7ZIP_ST + p->mtCoder_WasConstructed = False; + { + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + p->outBufs[i] = NULL; + p->outBufSize = 0; + } + #endif +} + + +static void XzEnc_FreeOutBufs(CXzEnc *p) +{ + unsigned i; + for (i = 0; i < MTCODER__BLOCKS_MAX; i++) + if (p->outBufs[i]) + { + ISzAlloc_Free(p->alloc, p->outBufs[i]); + p->outBufs[i] = NULL; + } + p->outBufSize = 0; +} + + +static void XzEnc_Free(CXzEnc *p, ISzAllocPtr alloc) +{ + unsigned i; + + XzEncIndex_Free(&p->xzIndex, alloc); + + for (i = 0; i < MTCODER__THREADS_MAX; i++) + Lzma2WithFilters_Free(&p->lzmaf_Items[i], alloc); + + #ifndef _7ZIP_ST + if (p->mtCoder_WasConstructed) + { + MtCoder_Destruct(&p->mtCoder); + p->mtCoder_WasConstructed = False; + } + XzEnc_FreeOutBufs(p); + #endif +} + + +CXzEncHandle XzEnc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig) +{ + CXzEnc *p = (CXzEnc *)ISzAlloc_Alloc(alloc, sizeof(CXzEnc)); + if (!p) + return NULL; + XzEnc_Construct(p); + XzProps_Init(&p->xzProps); + XzProps_Normalize(&p->xzProps); + p->expectedDataSize = (UInt64)(Int64)-1; + p->alloc = alloc; + p->allocBig = allocBig; + return p; +} + + +void XzEnc_Destroy(CXzEncHandle pp) +{ + CXzEnc *p = (CXzEnc *)pp; + XzEnc_Free(p, p->alloc); + ISzAlloc_Free(p->alloc, p); +} + + +SRes XzEnc_SetProps(CXzEncHandle pp, const CXzProps *props) +{ + CXzEnc *p = (CXzEnc *)pp; + p->xzProps = *props; + XzProps_Normalize(&p->xzProps); + return SZ_OK; +} + + +void XzEnc_SetDataSize(CXzEncHandle pp, UInt64 expectedDataSiize) +{ + CXzEnc *p = (CXzEnc *)pp; + p->expectedDataSize = expectedDataSiize; +} + + + + +#ifndef _7ZIP_ST + +static SRes XzEnc_MtCallback_Code(void *pp, unsigned coderIndex, unsigned outBufIndex, + const Byte *src, size_t srcSize, int finished) +{ + CXzEnc *me = (CXzEnc *)pp; + SRes res; + CMtProgressThunk progressThunk; + + Byte *dest = me->outBufs[outBufIndex]; + + UNUSED_VAR(finished) + + { + CXzEncBlockInfo *bInfo = &me->EncBlocks[outBufIndex]; + bInfo->totalSize = 0; + bInfo->unpackSize = 0; + bInfo->headerSize = 0; + } + + if (!dest) + { + dest = (Byte *)ISzAlloc_Alloc(me->alloc, me->outBufSize); + if (!dest) + return SZ_ERROR_MEM; + me->outBufs[outBufIndex] = dest; + } + + MtProgressThunk_CreateVTable(&progressThunk); + progressThunk.mtProgress = &me->mtCoder.mtProgress; + MtProgressThunk_Init(&progressThunk); + + { + CXzEncBlockInfo blockSizes; + int inStreamFinished; + + res = Xz_CompressBlock( + &me->lzmaf_Items[coderIndex], + + NULL, + dest, + dest + XZ_BLOCK_HEADER_SIZE_MAX, me->outBufSize - XZ_BLOCK_HEADER_SIZE_MAX, + + NULL, + // srcSize, // expectedSize + src, srcSize, + + &me->xzProps, + &progressThunk.vt, + &inStreamFinished, + &blockSizes, + me->alloc, + me->allocBig); + + if (res == SZ_OK) + me->EncBlocks[outBufIndex] = blockSizes; + + return res; + } +} + + +static SRes XzEnc_MtCallback_Write(void *pp, unsigned outBufIndex) +{ + CXzEnc *me = (CXzEnc *)pp; + + const CXzEncBlockInfo *bInfo = &me->EncBlocks[outBufIndex]; + const Byte *data = me->outBufs[outBufIndex]; + + RINOK(WriteBytes(me->outStream, data, bInfo->headerSize)); + + { + UInt64 totalPackFull = bInfo->totalSize + XZ_GET_PAD_SIZE(bInfo->totalSize); + RINOK(WriteBytes(me->outStream, data + XZ_BLOCK_HEADER_SIZE_MAX, (size_t)totalPackFull - bInfo->headerSize)); + } + + return XzEncIndex_AddIndexRecord(&me->xzIndex, bInfo->unpackSize, bInfo->totalSize, me->alloc); +} + +#endif + + + +SRes XzEnc_Encode(CXzEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress) +{ + CXzEnc *p = (CXzEnc *)pp; + + const CXzProps *props = &p->xzProps; + + XzEncIndex_Init(&p->xzIndex); + { + UInt64 numBlocks = 1; + UInt64 blockSize = props->blockSize; + + if (blockSize != XZ_PROPS__BLOCK_SIZE__SOLID + && props->reduceSize != (UInt64)(Int64)-1) + { + numBlocks = props->reduceSize / blockSize; + if (numBlocks * blockSize != props->reduceSize) + numBlocks++; + } + else + blockSize = (UInt64)1 << 62; + + RINOK(XzEncIndex_PreAlloc(&p->xzIndex, numBlocks, blockSize, XZ_GET_ESTIMATED_BLOCK_TOTAL_PACK_SIZE(blockSize), p->alloc)); + } + + RINOK(Xz_WriteHeader((CXzStreamFlags)props->checkId, outStream)); + + + #ifndef _7ZIP_ST + if (props->numBlockThreads_Reduced > 1) + { + IMtCoderCallback2 vt; + + if (!p->mtCoder_WasConstructed) + { + p->mtCoder_WasConstructed = True; + MtCoder_Construct(&p->mtCoder); + } + + vt.Code = XzEnc_MtCallback_Code; + vt.Write = XzEnc_MtCallback_Write; + + p->checkType = props->checkId; + p->xzProps = *props; + + p->outStream = outStream; + + p->mtCoder.allocBig = p->allocBig; + p->mtCoder.progress = progress; + p->mtCoder.inStream = inStream; + p->mtCoder.inData = NULL; + p->mtCoder.inDataSize = 0; + p->mtCoder.mtCallback = &vt; + p->mtCoder.mtCallbackObject = p; + + if ( props->blockSize == XZ_PROPS__BLOCK_SIZE__SOLID + || props->blockSize == XZ_PROPS__BLOCK_SIZE__AUTO) + return SZ_ERROR_FAIL; + + p->mtCoder.blockSize = (size_t)props->blockSize; + if (p->mtCoder.blockSize != props->blockSize) + return SZ_ERROR_PARAM; /* SZ_ERROR_MEM */ + + { + size_t destBlockSize = XZ_BLOCK_HEADER_SIZE_MAX + XZ_GET_MAX_BLOCK_PACK_SIZE(p->mtCoder.blockSize); + if (destBlockSize < p->mtCoder.blockSize) + return SZ_ERROR_PARAM; + if (p->outBufSize != destBlockSize) + XzEnc_FreeOutBufs(p); + p->outBufSize = destBlockSize; + } + + p->mtCoder.numThreadsMax = props->numBlockThreads_Max; + p->mtCoder.expectedDataSize = p->expectedDataSize; + + RINOK(MtCoder_Code(&p->mtCoder)); + } + else + #endif + { + int writeStartSizes; + CCompressProgress_XzEncOffset progress2; + Byte *bufData = NULL; + size_t bufSize = 0; + + progress2.vt.Progress = CompressProgress_XzEncOffset_Progress; + progress2.inOffset = 0; + progress2.outOffset = 0; + progress2.progress = progress; + + writeStartSizes = 0; + + if (props->blockSize != XZ_PROPS__BLOCK_SIZE__SOLID) + { + writeStartSizes = (props->forceWriteSizesInHeader > 0); + + if (writeStartSizes) + { + size_t t2; + size_t t = (size_t)props->blockSize; + if (t != props->blockSize) + return SZ_ERROR_PARAM; + t = XZ_GET_MAX_BLOCK_PACK_SIZE(t); + if (t < props->blockSize) + return SZ_ERROR_PARAM; + t2 = XZ_BLOCK_HEADER_SIZE_MAX + t; + if (!p->outBufs[0] || t2 != p->outBufSize) + { + XzEnc_FreeOutBufs(p); + p->outBufs[0] = (Byte *)ISzAlloc_Alloc(p->alloc, t2); + if (!p->outBufs[0]) + return SZ_ERROR_MEM; + p->outBufSize = t2; + } + bufData = p->outBufs[0] + XZ_BLOCK_HEADER_SIZE_MAX; + bufSize = t; + } + } + + for (;;) + { + CXzEncBlockInfo blockSizes; + int inStreamFinished; + + /* + UInt64 rem = (UInt64)(Int64)-1; + if (props->reduceSize != (UInt64)(Int64)-1 + && props->reduceSize >= progress2.inOffset) + rem = props->reduceSize - progress2.inOffset; + */ + + blockSizes.headerSize = 0; // for GCC + + RINOK(Xz_CompressBlock( + &p->lzmaf_Items[0], + + writeStartSizes ? NULL : outStream, + writeStartSizes ? p->outBufs[0] : NULL, + bufData, bufSize, + + inStream, + // rem, + NULL, 0, + + props, + progress ? &progress2.vt : NULL, + &inStreamFinished, + &blockSizes, + p->alloc, + p->allocBig)); + + { + UInt64 totalPackFull = blockSizes.totalSize + XZ_GET_PAD_SIZE(blockSizes.totalSize); + + if (writeStartSizes) + { + RINOK(WriteBytes(outStream, p->outBufs[0], blockSizes.headerSize)); + RINOK(WriteBytes(outStream, bufData, (size_t)totalPackFull - blockSizes.headerSize)); + } + + RINOK(XzEncIndex_AddIndexRecord(&p->xzIndex, blockSizes.unpackSize, blockSizes.totalSize, p->alloc)); + + progress2.inOffset += blockSizes.unpackSize; + progress2.outOffset += totalPackFull; + } + + if (inStreamFinished) + break; + } + } + + return XzEncIndex_WriteFooter(&p->xzIndex, (CXzStreamFlags)props->checkId, outStream); +} + + +#include "Alloc.h" + +SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream, + const CXzProps *props, ICompressProgress *progress) +{ + SRes res; + CXzEncHandle xz = XzEnc_Create(&g_Alloc, &g_BigAlloc); + if (!xz) + return SZ_ERROR_MEM; + res = XzEnc_SetProps(xz, props); + if (res == SZ_OK) + res = XzEnc_Encode(xz, outStream, inStream, progress); + XzEnc_Destroy(xz); + return res; +} + + +SRes Xz_EncodeEmpty(ISeqOutStream *outStream) +{ + SRes res; + CXzEncIndex xzIndex; + XzEncIndex_Construct(&xzIndex); + res = Xz_WriteHeader((CXzStreamFlags)0, outStream); + if (res == SZ_OK) + res = XzEncIndex_WriteFooter(&xzIndex, (CXzStreamFlags)0, outStream); + XzEncIndex_Free(&xzIndex, NULL); // g_Alloc + return res; +} diff --git a/3rdparty/7z/src/XzEnc.h b/3rdparty/7z/src/XzEnc.h new file mode 100644 index 0000000000..529ac3fd8c --- /dev/null +++ b/3rdparty/7z/src/XzEnc.h @@ -0,0 +1,60 @@ +/* XzEnc.h -- Xz Encode +2017-06-27 : Igor Pavlov : Public domain */ + +#ifndef __XZ_ENC_H +#define __XZ_ENC_H + +#include "Lzma2Enc.h" + +#include "Xz.h" + +EXTERN_C_BEGIN + + +#define XZ_PROPS__BLOCK_SIZE__AUTO LZMA2_ENC_PROPS__BLOCK_SIZE__AUTO +#define XZ_PROPS__BLOCK_SIZE__SOLID LZMA2_ENC_PROPS__BLOCK_SIZE__SOLID + + +typedef struct +{ + UInt32 id; + UInt32 delta; + UInt32 ip; + int ipDefined; +} CXzFilterProps; + +void XzFilterProps_Init(CXzFilterProps *p); + + +typedef struct +{ + CLzma2EncProps lzma2Props; + CXzFilterProps filterProps; + unsigned checkId; + UInt64 blockSize; + int numBlockThreads_Reduced; + int numBlockThreads_Max; + int numTotalThreads; + int forceWriteSizesInHeader; + UInt64 reduceSize; +} CXzProps; + +void XzProps_Init(CXzProps *p); + + +typedef void * CXzEncHandle; + +CXzEncHandle XzEnc_Create(ISzAllocPtr alloc, ISzAllocPtr allocBig); +void XzEnc_Destroy(CXzEncHandle p); +SRes XzEnc_SetProps(CXzEncHandle p, const CXzProps *props); +void XzEnc_SetDataSize(CXzEncHandle p, UInt64 expectedDataSiize); +SRes XzEnc_Encode(CXzEncHandle p, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress); + +SRes Xz_Encode(ISeqOutStream *outStream, ISeqInStream *inStream, + const CXzProps *props, ICompressProgress *progress); + +SRes Xz_EncodeEmpty(ISeqOutStream *outStream); + +EXTERN_C_END + +#endif diff --git a/3rdparty/7z/src/XzIn.c b/3rdparty/7z/src/XzIn.c new file mode 100644 index 0000000000..792a617865 --- /dev/null +++ b/3rdparty/7z/src/XzIn.c @@ -0,0 +1,319 @@ +/* XzIn.c - Xz input +2018-07-04 : Igor Pavlov : Public domain */ + +#include "Precomp.h" + +#include + +#include "7zCrc.h" +#include "CpuArch.h" +#include "Xz.h" + +/* +#define XZ_FOOTER_SIG_CHECK(p) (memcmp((p), XZ_FOOTER_SIG, XZ_FOOTER_SIG_SIZE) == 0) +*/ +#define XZ_FOOTER_SIG_CHECK(p) ((p)[0] == XZ_FOOTER_SIG_0 && (p)[1] == XZ_FOOTER_SIG_1) + + +SRes Xz_ReadHeader(CXzStreamFlags *p, ISeqInStream *inStream) +{ + Byte sig[XZ_STREAM_HEADER_SIZE]; + RINOK(SeqInStream_Read2(inStream, sig, XZ_STREAM_HEADER_SIZE, SZ_ERROR_NO_ARCHIVE)); + if (memcmp(sig, XZ_SIG, XZ_SIG_SIZE) != 0) + return SZ_ERROR_NO_ARCHIVE; + return Xz_ParseHeader(p, sig); +} + +#define READ_VARINT_AND_CHECK(buf, pos, size, res) \ + { unsigned s = Xz_ReadVarInt(buf + pos, size - pos, res); \ + if (s == 0) return SZ_ERROR_ARCHIVE; pos += s; } + +SRes XzBlock_ReadHeader(CXzBlock *p, ISeqInStream *inStream, BoolInt *isIndex, UInt32 *headerSizeRes) +{ + Byte header[XZ_BLOCK_HEADER_SIZE_MAX]; + unsigned headerSize; + *headerSizeRes = 0; + RINOK(SeqInStream_ReadByte(inStream, &header[0])); + headerSize = (unsigned)header[0]; + if (headerSize == 0) + { + *headerSizeRes = 1; + *isIndex = True; + return SZ_OK; + } + + *isIndex = False; + headerSize = (headerSize << 2) + 4; + *headerSizeRes = headerSize; + RINOK(SeqInStream_Read(inStream, header + 1, headerSize - 1)); + return XzBlock_Parse(p, header); +} + +#define ADD_SIZE_CHECK(size, val) \ + { UInt64 newSize = size + (val); if (newSize < size) return XZ_SIZE_OVERFLOW; size = newSize; } + +UInt64 Xz_GetUnpackSize(const CXzStream *p) +{ + UInt64 size = 0; + size_t i; + for (i = 0; i < p->numBlocks; i++) + ADD_SIZE_CHECK(size, p->blocks[i].unpackSize); + return size; +} + +UInt64 Xz_GetPackSize(const CXzStream *p) +{ + UInt64 size = 0; + size_t i; + for (i = 0; i < p->numBlocks; i++) + ADD_SIZE_CHECK(size, (p->blocks[i].totalSize + 3) & ~(UInt64)3); + return size; +} + +/* +SRes XzBlock_ReadFooter(CXzBlock *p, CXzStreamFlags f, ISeqInStream *inStream) +{ + return SeqInStream_Read(inStream, p->check, XzFlags_GetCheckSize(f)); +} +*/ + +static SRes Xz_ReadIndex2(CXzStream *p, const Byte *buf, size_t size, ISzAllocPtr alloc) +{ + size_t numBlocks, pos = 1; + UInt32 crc; + + if (size < 5 || buf[0] != 0) + return SZ_ERROR_ARCHIVE; + + size -= 4; + crc = CrcCalc(buf, size); + if (crc != GetUi32(buf + size)) + return SZ_ERROR_ARCHIVE; + + { + UInt64 numBlocks64; + READ_VARINT_AND_CHECK(buf, pos, size, &numBlocks64); + numBlocks = (size_t)numBlocks64; + if (numBlocks != numBlocks64 || numBlocks * 2 > size) + return SZ_ERROR_ARCHIVE; + } + + Xz_Free(p, alloc); + if (numBlocks != 0) + { + size_t i; + p->numBlocks = numBlocks; + p->blocks = (CXzBlockSizes *)ISzAlloc_Alloc(alloc, sizeof(CXzBlockSizes) * numBlocks); + if (!p->blocks) + return SZ_ERROR_MEM; + for (i = 0; i < numBlocks; i++) + { + CXzBlockSizes *block = &p->blocks[i]; + READ_VARINT_AND_CHECK(buf, pos, size, &block->totalSize); + READ_VARINT_AND_CHECK(buf, pos, size, &block->unpackSize); + if (block->totalSize == 0) + return SZ_ERROR_ARCHIVE; + } + } + while ((pos & 3) != 0) + if (buf[pos++] != 0) + return SZ_ERROR_ARCHIVE; + return (pos == size) ? SZ_OK : SZ_ERROR_ARCHIVE; +} + +static SRes Xz_ReadIndex(CXzStream *p, ILookInStream *stream, UInt64 indexSize, ISzAllocPtr alloc) +{ + SRes res; + size_t size; + Byte *buf; + if (indexSize > ((UInt32)1 << 31)) + return SZ_ERROR_UNSUPPORTED; + size = (size_t)indexSize; + if (size != indexSize) + return SZ_ERROR_UNSUPPORTED; + buf = (Byte *)ISzAlloc_Alloc(alloc, size); + if (!buf) + return SZ_ERROR_MEM; + res = LookInStream_Read2(stream, buf, size, SZ_ERROR_UNSUPPORTED); + if (res == SZ_OK) + res = Xz_ReadIndex2(p, buf, size, alloc); + ISzAlloc_Free(alloc, buf); + return res; +} + +static SRes LookInStream_SeekRead_ForArc(ILookInStream *stream, UInt64 offset, void *buf, size_t size) +{ + RINOK(LookInStream_SeekTo(stream, offset)); + return LookInStream_Read(stream, buf, size); + /* return LookInStream_Read2(stream, buf, size, SZ_ERROR_NO_ARCHIVE); */ +} + +static SRes Xz_ReadBackward(CXzStream *p, ILookInStream *stream, Int64 *startOffset, ISzAllocPtr alloc) +{ + UInt64 indexSize; + Byte buf[XZ_STREAM_FOOTER_SIZE]; + UInt64 pos = *startOffset; + + if ((pos & 3) != 0 || pos < XZ_STREAM_FOOTER_SIZE) + return SZ_ERROR_NO_ARCHIVE; + + pos -= XZ_STREAM_FOOTER_SIZE; + RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE)); + + if (!XZ_FOOTER_SIG_CHECK(buf + 10)) + { + UInt32 total = 0; + pos += XZ_STREAM_FOOTER_SIZE; + + for (;;) + { + size_t i; + #define TEMP_BUF_SIZE (1 << 10) + Byte temp[TEMP_BUF_SIZE]; + + i = (pos > TEMP_BUF_SIZE) ? TEMP_BUF_SIZE : (size_t)pos; + pos -= i; + RINOK(LookInStream_SeekRead_ForArc(stream, pos, temp, i)); + total += (UInt32)i; + for (; i != 0; i--) + if (temp[i - 1] != 0) + break; + if (i != 0) + { + if ((i & 3) != 0) + return SZ_ERROR_NO_ARCHIVE; + pos += i; + break; + } + if (pos < XZ_STREAM_FOOTER_SIZE || total > (1 << 16)) + return SZ_ERROR_NO_ARCHIVE; + } + + if (pos < XZ_STREAM_FOOTER_SIZE) + return SZ_ERROR_NO_ARCHIVE; + pos -= XZ_STREAM_FOOTER_SIZE; + RINOK(LookInStream_SeekRead_ForArc(stream, pos, buf, XZ_STREAM_FOOTER_SIZE)); + if (!XZ_FOOTER_SIG_CHECK(buf + 10)) + return SZ_ERROR_NO_ARCHIVE; + } + + p->flags = (CXzStreamFlags)GetBe16(buf + 8); + + if (!XzFlags_IsSupported(p->flags)) + return SZ_ERROR_UNSUPPORTED; + + if (GetUi32(buf) != CrcCalc(buf + 4, 6)) + return SZ_ERROR_ARCHIVE; + + indexSize = ((UInt64)GetUi32(buf + 4) + 1) << 2; + + if (pos < indexSize) + return SZ_ERROR_ARCHIVE; + + pos -= indexSize; + RINOK(LookInStream_SeekTo(stream, pos)); + RINOK(Xz_ReadIndex(p, stream, indexSize, alloc)); + + { + UInt64 totalSize = Xz_GetPackSize(p); + if (totalSize == XZ_SIZE_OVERFLOW + || totalSize >= ((UInt64)1 << 63) + || pos < totalSize + XZ_STREAM_HEADER_SIZE) + return SZ_ERROR_ARCHIVE; + pos -= (totalSize + XZ_STREAM_HEADER_SIZE); + RINOK(LookInStream_SeekTo(stream, pos)); + *startOffset = pos; + } + { + CXzStreamFlags headerFlags; + CSecToRead secToRead; + SecToRead_CreateVTable(&secToRead); + secToRead.realStream = stream; + + RINOK(Xz_ReadHeader(&headerFlags, &secToRead.vt)); + return (p->flags == headerFlags) ? SZ_OK : SZ_ERROR_ARCHIVE; + } +} + + +/* ---------- Xz Streams ---------- */ + +void Xzs_Construct(CXzs *p) +{ + p->num = p->numAllocated = 0; + p->streams = 0; +} + +void Xzs_Free(CXzs *p, ISzAllocPtr alloc) +{ + size_t i; + for (i = 0; i < p->num; i++) + Xz_Free(&p->streams[i], alloc); + ISzAlloc_Free(alloc, p->streams); + p->num = p->numAllocated = 0; + p->streams = 0; +} + +UInt64 Xzs_GetNumBlocks(const CXzs *p) +{ + UInt64 num = 0; + size_t i; + for (i = 0; i < p->num; i++) + num += p->streams[i].numBlocks; + return num; +} + +UInt64 Xzs_GetUnpackSize(const CXzs *p) +{ + UInt64 size = 0; + size_t i; + for (i = 0; i < p->num; i++) + ADD_SIZE_CHECK(size, Xz_GetUnpackSize(&p->streams[i])); + return size; +} + +/* +UInt64 Xzs_GetPackSize(const CXzs *p) +{ + UInt64 size = 0; + size_t i; + for (i = 0; i < p->num; i++) + ADD_SIZE_CHECK(size, Xz_GetTotalSize(&p->streams[i])); + return size; +} +*/ + +SRes Xzs_ReadBackward(CXzs *p, ILookInStream *stream, Int64 *startOffset, ICompressProgress *progress, ISzAllocPtr alloc) +{ + Int64 endOffset = 0; + RINOK(ILookInStream_Seek(stream, &endOffset, SZ_SEEK_END)); + *startOffset = endOffset; + for (;;) + { + CXzStream st; + SRes res; + Xz_Construct(&st); + res = Xz_ReadBackward(&st, stream, startOffset, alloc); + st.startOffset = *startOffset; + RINOK(res); + if (p->num == p->numAllocated) + { + size_t newNum = p->num + p->num / 4 + 1; + Byte *data = (Byte *)ISzAlloc_Alloc(alloc, newNum * sizeof(CXzStream)); + if (!data) + return SZ_ERROR_MEM; + p->numAllocated = newNum; + if (p->num != 0) + memcpy(data, p->streams, p->num * sizeof(CXzStream)); + ISzAlloc_Free(alloc, p->streams); + p->streams = (CXzStream *)data; + } + p->streams[p->num++] = st; + if (*startOffset == 0) + break; + RINOK(LookInStream_SeekTo(stream, *startOffset)); + if (progress && ICompressProgress_Progress(progress, endOffset - *startOffset, (UInt64)(Int64)-1) != SZ_OK) + return SZ_ERROR_PROGRESS; + } + return SZ_OK; +} diff --git a/3rdparty/7zip/7zip b/3rdparty/7zip/7zip deleted file mode 160000 index e5431fa6f5..0000000000 --- a/3rdparty/7zip/7zip +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e5431fa6f5505e385c6f9367260717e9c47dc2ee diff --git a/3rdparty/7zip/7zip.filters b/3rdparty/7zip/7zip.filters deleted file mode 100644 index 1ba9da711a..0000000000 --- a/3rdparty/7zip/7zip.filters +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/3rdparty/7zip/CMakeLists.txt b/3rdparty/7zip/CMakeLists.txt deleted file mode 100644 index 706d869472..0000000000 --- a/3rdparty/7zip/CMakeLists.txt +++ /dev/null @@ -1,72 +0,0 @@ -# 7zip sdk -if(WIN32 OR APPLE) - add_library(3rdparty_7zip STATIC EXCLUDE_FROM_ALL - 7zip/C/7zAlloc.c - 7zip/C/7zArcIn.c - 7zip/C/7zBuf.c - 7zip/C/7zBuf2.c - 7zip/C/7zCrc.c - 7zip/C/7zCrcOpt.c - 7zip/C/7zDec.c - 7zip/C/7zFile.c - 7zip/C/7zStream.c - 7zip/C/Aes.c - 7zip/C/AesOpt.c - 7zip/C/Alloc.c - 7zip/C/Bcj2.c - 7zip/C/Bcj2Enc.c - 7zip/C/Blake2s.c - 7zip/C/Bra.c - 7zip/C/Bra86.c - 7zip/C/BraIA64.c - 7zip/C/BwtSort.c - 7zip/C/CpuArch.c - 7zip/C/Delta.c - 7zip/C/DllSecur.c - 7zip/C/HuffEnc.c - 7zip/C/LzFind.c - 7zip/C/LzFindMt.c - 7zip/C/LzFindOpt.c - 7zip/C/Lzma2Dec.c - 7zip/C/Lzma2DecMt.c - 7zip/C/Lzma2Enc.c - 7zip/C/Lzma86Dec.c - 7zip/C/Lzma86Enc.c - 7zip/C/LzmaDec.c - 7zip/C/LzmaEnc.c - 7zip/C/LzmaLib.c - 7zip/C/MtCoder.c - 7zip/C/MtDec.c - 7zip/C/Ppmd7.c - 7zip/C/Ppmd7aDec.c - 7zip/C/Ppmd7Dec.c - 7zip/C/Ppmd7Enc.c - 7zip/C/Ppmd8.c - 7zip/C/Ppmd8Dec.c - 7zip/C/Ppmd8Enc.c - 7zip/C/Sha1.c - 7zip/C/Sha1Opt.c - 7zip/C/Sha256.c - 7zip/C/Sha256Opt.c - 7zip/C/Sort.c - 7zip/C/SwapBytes.c - 7zip/C/Threads.c - 7zip/C/Xxh64.c - 7zip/C/Xz.c - 7zip/C/XzCrc64.c - 7zip/C/XzCrc64Opt.c - 7zip/C/XzDec.c - 7zip/C/XzEnc.c - 7zip/C/XzIn.c - 7zip/C/ZstdDec.c) - target_include_directories(3rdparty_7zip INTERFACE - $ - $) - - target_include_directories(3rdparty_7zip INTERFACE 7zip) - - set_property(TARGET 3rdparty_7zip PROPERTY FOLDER "3rdparty/") - -else() - add_library(3rdparty_7zip INTERFACE) -endif() diff --git a/3rdparty/CMakeLists.txt b/3rdparty/CMakeLists.txt index 6c49a889ba..d5bea616c9 100644 --- a/3rdparty/CMakeLists.txt +++ b/3rdparty/CMakeLists.txt @@ -4,14 +4,6 @@ include(CMakeDependentOption) set(CMAKE_CXX_STANDARD 20) -# Defines the ARCHITECTURE variable -include("DetectArchitecture.cmake") - -# Warnings are silenced for 3rdparty code -if(NOT MSVC) - add_compile_options("$<$:-w>") -endif() - # Dummy target to use when lib isn't available add_library(3rdparty_dummy_lib INTERFACE) @@ -19,21 +11,13 @@ add_library(3rdparty_dummy_lib INTERFACE) # ZLib add_subdirectory(zlib EXCLUDE_FROM_ALL) -# ZSTD -add_subdirectory(zstd EXCLUDE_FROM_ALL) - -# 7zip sdk -add_subdirectory(7zip EXCLUDE_FROM_ALL) +# 7z sdk +add_subdirectory(7z EXCLUDE_FROM_ALL) add_library(3rdparty_flatbuffers INTERFACE) if (USE_SYSTEM_FLATBUFFERS) pkg_check_modules(FLATBUFFERS REQUIRED IMPORTED_TARGET flatbuffers>=2.0.0) target_link_libraries(3rdparty_flatbuffers INTERFACE PkgConfig::FLATBUFFERS) - set(FBS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../rpcs3/Emu/NP/generated/") - execute_process(COMMAND flatc --cpp -o "${FBS_DIR}" "${FBS_DIR}/np2_structs.fbs" RESULT_VARIABLE FBS_CMD_ERROR) - if(FBS_CMD_ERROR AND NOT FBS_CMD_ERROR EQUAL 0) - message(FATAL_ERROR "flatc failed to regenerate flatbuffers headers.") - endif() else() target_include_directories(3rdparty_flatbuffers INTERFACE flatbuffers/include) endif() @@ -44,7 +28,7 @@ add_subdirectory(libpng EXCLUDE_FROM_ALL) # pugixml if (USE_SYSTEM_PUGIXML) - pkg_check_modules(PUGIXML REQUIRED IMPORTED_TARGET pugixml>=1.15) + pkg_check_modules(PUGIXML REQUIRED IMPORTED_TARGET pugixml>=1.11) add_library(pugixml INTERFACE) target_link_libraries(pugixml INTERFACE PkgConfig::PUGIXML) else() @@ -53,16 +37,16 @@ endif() # libusb -if(CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD") +if(CMAKE_SYSTEM MATCHES "DragonFly|FreeBSD") pkg_check_modules(LIBUSB REQUIRED IMPORTED_TARGET libusb-1.0>=1.0 ) - cmake_dependent_option(USE_SYSTEM_LIBUSB "Use system libusb-1.0 as shared library" ON + CMAKE_DEPENDENT_OPTION( USE_SYSTEM_LIBUSB "Use system libusb-1.0 as shared library" ON "LIBUSB_FOUND" OFF) else() pkg_check_modules(LIBUSB IMPORTED_TARGET libusb-1.0>=1.0 ) - cmake_dependent_option(USE_SYSTEM_LIBUSB "Use system libusb-1.0 as shared library" OFF + CMAKE_DEPENDENT_OPTION( USE_SYSTEM_LIBUSB "Use system libusb-1.0 as shared library" OFF "LIBUSB_FOUND" OFF) endif() -if(CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD") +if(CMAKE_SYSTEM MATCHES "DragonFly|FreeBSD") # Always use system libusb as reference implementation isn't supported add_library(usb-1.0-shared INTERFACE) target_link_libraries(usb-1.0-shared INTERFACE PkgConfig::LIBUSB) @@ -79,10 +63,6 @@ else() # we don't have the system libusb, so we compile from submodule unset(LIBUSB_LIBRARIES CACHE) add_subdirectory(libusb EXCLUDE_FROM_ALL) - - if (NOT TARGET usb-1.0 AND TARGET usb-1.0-static) - add_library(usb-1.0 ALIAS usb-1.0-static) - endif() endif() endif() @@ -90,63 +70,101 @@ endif() # hidapi add_subdirectory(hidapi) -# glslang + +# Vulkan add_subdirectory(glslang EXCLUDE_FROM_ALL) -add_library(3rdparty_glslang INTERFACE) -target_link_libraries(3rdparty_glslang INTERFACE SPIRV) +add_subdirectory(SPIRV EXCLUDE_FROM_ALL) # yaml-cpp add_subdirectory(yaml-cpp) -# OpenGL - -if (NOT ANDROID) - find_package(OpenGL REQUIRED OPTIONAL_COMPONENTS EGL) - - add_library(3rdparty_opengl INTERFACE) - target_include_directories(3rdparty_opengl INTERFACE GL) - - if (WIN32) - if(NOT MSVC) - target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU) - else() - target_link_libraries(3rdparty_opengl INTERFACE dxgi.lib d2d1.lib dwrite.lib) - endif() - elseif(APPLE) - target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU) - else() - target_link_libraries(3rdparty_opengl INTERFACE OpenGL::GL OpenGL::GLU OpenGL::GLX) - endif() +# xxHash +if (USE_SYSTEM_XXHASH) + pkg_check_modules(XXHASH REQUIRED IMPORTED_TARGET libxxhash) + add_library(xxhash INTERFACE) + target_link_libraries(xxhash INTERFACE PkgConfig::XXHASH) else() - add_library(3rdparty_opengl INTERFACE) - target_compile_definitions(3rdparty_opengl INTERFACE WITHOUT_OPENGL=1) + set(XXHASH_BUNDLED_MODE ON) + set(XXHASH_BUILD_XXHSUM OFF) + set(BUILD_SHARED_LIBS OFF CACHE BOOL "Make xxHash build static libs") + add_subdirectory(xxHash/cmake_unofficial EXCLUDE_FROM_ALL) + target_include_directories(xxhash INTERFACE xxHash) endif() +# OpenGL + +# Prefer GLVND for OpenGL rather than legacy, unless it's been defined elsewhere, in the case of AppImage builds +if(NOT DEFINED OpenGL_GL_PREFERENCE) + set(OpenGL_GL_PREFERENCE GLVND) +endif() +find_package(OpenGL REQUIRED) + +add_library(3rdparty_opengl INTERFACE) +target_include_directories(3rdparty_opengl INTERFACE GL) + +if (WIN32) + if(NOT MSVC) + target_link_libraries(3rdparty_opengl INTERFACE ${OPENGL_LIBRARIES} opengl32.lib glu32.lib) + else() + target_link_libraries(3rdparty_opengl INTERFACE dxgi.lib d2d1.lib dwrite.lib) + endif() +else() + target_link_libraries(3rdparty_opengl INTERFACE ${OPENGL_LIBRARIES}) + + target_compile_definitions(3rdparty_opengl + INTERFACE + -DGL_GLEXT_PROTOTYPES + -DGLX_GLXEXT_PROTOTYPES) +endif() + + # stblib -add_subdirectory(stblib) +add_library(3rdparty_stblib INTERFACE) +target_include_directories(3rdparty_stblib INTERFACE stblib/include) + # DiscordRPC add_subdirectory(discord-rpc) -# Cubeb -if(USE_SYSTEM_CUBEB) - find_package(cubeb REQUIRED GLOBAL) - message(STATUS "Using system cubeb version '${cubeb_VERSION}'") - add_library(3rdparty::cubeb ALIAS cubeb::cubeb) -else() - message(STATUS "Using static cubeb from 3rdparty") - add_subdirectory(cubeb EXCLUDE_FROM_ALL) + +# ALSA +set(ALSA_TARGET 3rdparty_dummy_lib) + +if(USE_ALSA) + find_package(ALSA) + if(ALSA_FOUND) + add_library(3rdparty_alsa INTERFACE) + target_compile_definitions(3rdparty_alsa INTERFACE -DHAVE_ALSA) + target_include_directories(3rdparty_alsa SYSTEM INTERFACE ${ALSA_INCLUDE_DIRS}) + target_link_libraries(3rdparty_alsa INTERFACE ${ALSA_LIBRARIES}) + + set(ALSA_TARGET 3rdparty_alsa) + endif() endif() -# SoundTouch -add_subdirectory(SoundTouch EXCLUDE_FROM_ALL) + +# Pulse +set(PULSE_TARGET 3rdparty_dummy_lib) +if(USE_PULSE) + pkg_check_modules(PULSE libpulse-simple) + + if(PULSE_FOUND) + add_library(3rdparty_pulse INTERFACE) + target_compile_definitions(3rdparty_pulse INTERFACE -DHAVE_PULSE) + target_include_directories(3rdparty_pulse SYSTEM + INTERFACE ${PULSE_INCLUDE_DIRS}) + target_link_libraries(3rdparty_pulse INTERFACE ${PULSE_LDFLAGS}) + + set(PULSE_TARGET 3rdparty_pulse) + endif() +endif() # libevdev set(LIBEVDEV_TARGET 3rdparty_dummy_lib) if(USE_LIBEVDEV) - pkg_check_modules(LIBEVDEV libevdev libudev) + pkg_check_modules(LIBEVDEV libevdev) if(LIBEVDEV_FOUND) add_library(3rdparty_libevdev INTERFACE) target_compile_definitions(3rdparty_libevdev INTERFACE -DHAVE_LIBEVDEV) @@ -162,97 +180,49 @@ endif() # Vulkan set(VULKAN_TARGET 3rdparty_dummy_lib) if(USE_VULKAN) - if(APPLE) - if(USE_SYSTEM_MVK) - message(STATUS "RPCS3: Using system MoltenVK") - else() - message(STATUS "RPCS3: MoltenVK submodule") - - execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK" - ) - execute_process(COMMAND "${CMAKE_COMMAND}" --build . - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK" - ) - - add_library(moltenvk_lib SHARED IMPORTED) - add_dependencies(moltenvk_lib moltenvk) - set_target_properties(moltenvk_lib - PROPERTIES IMPORTED_LOCATION "{Vulkan_LIBRARY}" - ) - - set(VULKAN_SDK "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVK/MoltenVK") - set(VK_ICD_FILENAMES "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVK/MoltenVK/icd/MoltenVK_icd.json") - set(Vulkan_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVK/MoltenVK/include") - set(Vulkan_LIBRARY "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVK/Build/Products/Release/dynamic/libMoltenVK.dylib") - set(Vulkan_TOOLS "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVK/Build/Products/Release") - endif() - endif() - find_package(Vulkan) if(VULKAN_FOUND) add_library(3rdparty_vulkan INTERFACE) target_compile_definitions(3rdparty_vulkan INTERFACE -DHAVE_VULKAN) - target_link_libraries(3rdparty_vulkan INTERFACE Vulkan::Vulkan) + target_link_libraries(3rdparty_vulkan INTERFACE SPIRV SPIRV-Tools-opt Vulkan::Vulkan) - if(UNIX AND NOT APPLE AND NOT ANDROID) + if(UNIX AND NOT APPLE) find_package(Wayland) if (WAYLAND_FOUND) target_include_directories(3rdparty_vulkan INTERFACE ${WAYLAND_INCLUDE_DIR}) + + target_compile_definitions(3rdparty_vulkan + INTERFACE -DVK_USE_PLATFORM_WAYLAND_KHR) endif() endif() set(VULKAN_TARGET 3rdparty_vulkan) else() - message(WARNING "USE_VULKAN was enabled, but libvulkan was not found. RPCS3 will be compiled without Vulkan support.") - if(APPLE) - message(FATAL_ERROR "To build without Vulkan support on macOS, please disable USE_VULKAN.") - endif() + message("WARNING! USE_VULKAN was enabled, but libvulkan was not found. RPCS3 will be compiled without Vulkan support.") endif() endif() # AsmJit add_subdirectory(asmjit EXCLUDE_FROM_ALL) -# SDL3 -set(SDL3_TARGET 3rdparty_dummy_lib) -if(USE_SDL) - if(USE_SYSTEM_SDL) - find_package(SDL3) - if(SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0) - message(STATUS "Using system SDL3 version '${SDL3_VERSION}'") - add_library(3rdparty_sdl3 INTERFACE) - target_compile_definitions(3rdparty_sdl3 INTERFACE -DHAVE_SDL3=1) - target_link_libraries(3rdparty_sdl3 INTERFACE SDL3::SDL3) - set(SDL3_TARGET 3rdparty_sdl3) - else() - message(FATAL_ERROR "SDL3 is not available on this system") - endif() - else() - message(STATUS "Using static SDL3 from 3rdparty") - add_subdirectory(libsdl-org EXCLUDE_FROM_ALL) - target_compile_definitions(SDL3-static INTERFACE -DHAVE_SDL3=1) - set(SDL3_TARGET SDL3-static) - set(SDL3_DIR "${CMAKE_CURRENT_BINARY_DIR}/libsdl-org/SDL" CACHE STRING "") - endif() -endif() - # OpenAL -if (NOT ANDROID) - add_subdirectory(OpenAL EXCLUDE_FROM_ALL) -else() - add_library(3rdparty_openal INTERFACE) - target_compile_definitions(3rdparty_openal INTERFACE WITHOUT_OPENAL=1) -endif() +add_subdirectory(OpenAL EXCLUDE_FROM_ALL) # FAudio set(FAUDIO_TARGET 3rdparty_dummy_lib) if(USE_FAUDIO) - # FAudio depends on SDL3 - find_package(SDL3) - if (USE_SYSTEM_FAUDIO) - if (SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0) + # FAudio depends on SDL2 + find_package(SDL2) + if (NOT SDL2_FOUND OR SDL2_VERSION VERSION_LESS 2.0.12) + message(WARNING + "-- RPCS3: FAudio requires SDL 2.0.9 or newer. Please note, this warning" + "can also be displayed with SDL2 versions between 2.0.9-2.0.12, as the" + "CMake config files are not correctly installed. Since a valid SDL2" + ">=2.0.9 version cannot be found, building with FAudio will be skipped.") + set(USE_FAUDIO False) + else() + if (USE_SYSTEM_FAUDIO) message(STATUS "RPCS3: Using system FAudio") find_package(FAudio REQUIRED CONFIGS FAudioConfig.cmake FAudio-config.cmake) add_library(3rdparty_FAudio INTERFACE) @@ -260,103 +230,135 @@ if(USE_FAUDIO) target_compile_definitions(3rdparty_FAudio INTERFACE -DHAVE_FAUDIO) set(FAUDIO_TARGET 3rdparty_FAudio) else() - message(WARNING - "RPCS3: System FAudio requires SDL 3.2.0 or newer. Since a valid SDL3" - ">=3.2.0 version cannot be found, building with FAudio will be skipped.") - set(USE_FAUDIO OFF CACHE BOOL "Disabled using system FAudio with SDL < 3.2.0" FORCE) - endif() - else() - if (SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0) message(STATUS "RPCS3: Using builtin FAudio") set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared library") add_subdirectory(FAudio EXCLUDE_FROM_ALL) - target_compile_definitions(FAudio-static INTERFACE -DHAVE_FAUDIO) - set(FAUDIO_TARGET FAudio-static) - else() - message(FATAL_ERROR - "-- RPCS3: 3rdparty FAudio requires SDL 3.2.0 or newer. Since a valid SDL3" - ">=3.2.0 version cannot be found, building with FAudio will be skipped.") - set(USE_FAUDIO OFF CACHE BOOL "Disabled FAudio with SDL < 3.2.0" FORCE) + target_compile_definitions(FAudio INTERFACE -DHAVE_FAUDIO) + set(FAUDIO_TARGET FAudio) endif() endif() endif() -set_property(TARGET ${FAUDIO_TARGET} PROPERTY FOLDER "3rdparty/") - - # FFMPEG -if(NOT ANDROID) - add_library(3rdparty_ffmpeg INTERFACE) +add_library(3rdparty_ffmpeg INTERFACE) - # Select the version of ffmpeg to use, default is builtin - if(USE_SYSTEM_FFMPEG) - message(STATUS "RPCS3: using shared ffmpeg") - find_package(FFMPEG REQUIRED) +# Select the version of ffmpeg to use, default is builtin +if(USE_SYSTEM_FFMPEG) + message("-- RPCS3: using shared ffmpeg") + find_package(FFMPEG REQUIRED) - target_include_directories(3rdparty_ffmpeg INTERFACE ${FFMPEG_INCLUDE_DIR}) - target_link_libraries(3rdparty_ffmpeg INTERFACE ${FFMPEG_LIBRARIES}) + target_include_directories(3rdparty_ffmpeg INTERFACE ${FFMPEG_INCLUDE_DIR}) + target_link_libraries(3rdparty_ffmpeg INTERFACE ${FFMPEG_LIBRARIES}) +else() + if (NOT MSVC AND WIN32) + message("-- RPCS3: building ffmpeg submodule") + + ExternalProject_Add(ffmpeg-mingw + DOWNLOAD_COMMAND "" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/ffmpeg + BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/ffmpeg + CONFIGURE_COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/ffmpeg/configure --prefix=./windows/x86_64 --arch=x86_64 --disable-avdevice --disable-programs --disable-avfilter --disable-postproc --disable-doc --disable-pthreads --enable-w32threads --disable-network --disable-everything --disable-encoders --disable-muxers --disable-hwaccels --disable-parsers --disable-protocols --enable-dxva2 --enable-static --disable-shared --enable-decoder=aac --enable-decoder=aac_latm --enable-decoder=atrac3 --enable-decoder=atrac3p --enable-decoder=mp3 --enable-decoder=pcm_s16le --enable-decoder=pcm_s8 --enable-decoder=h264 --enable-decoder=mpeg4 --enable-decoder=mpeg2video --enable-decoder=mjpeg --enable-decoder=mjpegb --enable-encoder=pcm_s16le --enable-encoder=ffv1 --enable-encoder=mpeg4 --enable-parser=h264 --enable-parser=mpeg4video --enable-parser=mpegaudio --enable-parser=mpegvideo --enable-parser=mjpeg --enable-parser=aac --enable-parser=aac_latm --enable-muxer=avi --enable-demuxer=h264 --enable-demuxer=m4v --enable-demuxer=mp3 --enable-demuxer=mpegvideo --enable-demuxer=mpegps --enable-demuxer=mjpeg --enable-demuxer=avi --enable-demuxer=aac --enable-demuxer=pmp --enable-demuxer=oma --enable-demuxer=pcm_s16le --enable-demuxer=pcm_s8 --enable-demuxer=wav --enable-hwaccel=h264_dxva2 --enable-indev=dshow --enable-protocol=file + BUILD_COMMAND make -j 4 + INSTALL_COMMAND make install + ) + + set(FFMPEG_LIB_AVFORMAT "${CMAKE_CURRENT_BINARY_DIR}/ffmpeg/windows/x86_64/lib/libavformat.a") + set(FFMPEG_LIB_AVCODEC "${CMAKE_CURRENT_BINARY_DIR}/ffmpeg/windows/x86_64/lib/libavcodec.a") + set(FFMPEG_LIB_AVUTIL "${CMAKE_CURRENT_BINARY_DIR}/ffmpeg/windows/x86_64/lib/libavutil.a") + set(FFMPEG_LIB_SWSCALE "${CMAKE_CURRENT_BINARY_DIR}/ffmpeg/windows/x86_64/lib/libswscale.a") else() - message(STATUS "RPCS3: using builtin ffmpeg") - add_subdirectory(ffmpeg EXCLUDE_FROM_ALL) - # ffmpeg-core libraries are extracted to CMAKE_BINARY_DIR - set(FFMPEG_LIB_DIR "${CMAKE_BINARY_DIR}/3rdparty/ffmpeg/lib") + message("-- RPCS3: using builtin ffmpeg") if (WIN32) + set(FFMPEG_LIB_DIR "ffmpeg/windows/x86_64") target_link_libraries(3rdparty_ffmpeg INTERFACE "Bcrypt.lib") + elseif(CMAKE_SYSTEM MATCHES "Linux") + set(FFMPEG_LIB_DIR "ffmpeg/linux/x86_64") + elseif(APPLE) + set(FFMPEG_LIB_DIR "ffmpeg/macos/x86_64") + else() + message(FATAL_ERROR "Prebuilt ffmpeg is not available on this platform! Try USE_SYSTEM_FFMPEG=ON.") endif() find_library(FFMPEG_LIB_AVFORMAT avformat PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH) find_library(FFMPEG_LIB_AVCODEC avcodec PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH) find_library(FFMPEG_LIB_AVUTIL avutil PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH) find_library(FFMPEG_LIB_SWSCALE swscale PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH) - find_library(FFMPEG_LIB_SWRESAMPLE swresample PATHS ${FFMPEG_LIB_DIR} NO_DEFAULT_PATH) + endif() - if (FFMPEG_LIB_AVFORMAT MATCHES "FFMPEG_LIB_AVFORMAT-NOTFOUND") - message(FATAL_ERROR "@#$%! FFMPEG NOT FOUND! ${FFMPEG_LIB_DIR}") - endif() + target_include_directories(3rdparty_ffmpeg INTERFACE "ffmpeg/include") - target_link_libraries(3rdparty_ffmpeg + target_link_libraries(3rdparty_ffmpeg INTERFACE ${FFMPEG_LIB_AVFORMAT} ${FFMPEG_LIB_AVCODEC} ${FFMPEG_LIB_AVUTIL} - ${FFMPEG_LIB_SWSCALE} - ${FFMPEG_LIB_SWRESAMPLE} - ) - target_include_directories(3rdparty_ffmpeg INTERFACE "ffmpeg/include") - endif() + ${FFMPEG_LIB_SWSCALE}) endif() # GLEW add_library(3rdparty_glew INTERFACE) -if(NOT MSVC AND NOT ANDROID) - find_package(GLEW REQUIRED) +if(NOT MSVC) + find_package(GLEW 1.13.0 REQUIRED) target_link_libraries(3rdparty_glew INTERFACE GLEW::GLEW) endif() # LLVM -add_subdirectory(llvm EXCLUDE_FROM_ALL) +include(llvm.cmake) # WOLFSSL -add_subdirectory(wolfssl EXCLUDE_FROM_ALL) +if(USE_SYSTEM_WOLFSSL) + message("-- RPCS3: using shared wolfssl") + pkg_check_modules(WolfSSL REQUIRED IMPORTED_TARGET wolfssl>=4.7.0) + add_library(wolfssl INTERFACE) + target_link_libraries(wolfssl INTERFACE PkgConfig::WolfSSL) +else() + # TODO(cjj19970505@live.cn) + # OPENSSL_EXTRA, WOLFSSL_DES_ECB and HAVE_SNI are unconfigurable from CMake cache. + # but they do have it in a TODO list (wolfssl/CMakeList, 1021) + add_compile_definitions(OPENSSL_EXTRA WOLFSSL_DES_ECB HAVE_SNI) + + set(WOLFSSL_TLS13 "no" CACHE INTERNAL "") + set(WOLFSSL_SHA224 "yes" CACHE INTERNAL "") + set(WOLFSSL_SHA3 "yes" CACHE INTERNAL "") + set(WOLFSSL_SHAKE256 "yes" CACHE INTERNAL "") + set(WOLFSSL_BASE64_ENCODE "no" CACHE INTERNAL "") + set(WOLFSSL_DES3 "yes" CACHE INTERNAL "") + set(WOLFSSL_POLY1305 "yes" CACHE INTERNAL "") + set(WOLFSSL_CHACHA "yes" CACHE INTERNAL "") + set(WOLFSSL_FILESYSTEM "yes" CACHE INTERNAL "") + set(WOLFSSL_PWDBASED "yes" CACHE INTERNAL "") + set(WOLFSSL_FAST_MATH "no" CACHE INTERNAL "") + set(WOLFSSL_EXAMPLES "no" CACHE INTERNAL "") + set(WOLFSSL_CRYPT_TESTS "no" CACHE INTERNAL "") + set(WOLFSSL_ASYNC_THREADS "no" CACHE INTERNAL "") + set(WOLFSSL_CONFIG_H "no" CACHE INTERNAL "") + + add_subdirectory(wolfssl EXCLUDE_FROM_ALL) + + # TODO(cjj19970505@live.cn) + # This only works in single-config generator + # For a multi-config generator, we need to provide different wolfssl binaries for different config + if (GENERATOR_IS_MULTI_CONFIG) + message( FATAL_ERROR "RPCS3 can only be configured using single-config generator." ) + endif() + + if(MSVC) + set(WolfSSL_LIBRARY "${CMAKE_CURRENT_BINARY_DIR}/wolfssl/wolfssl.lib") + else() + set(WolfSSL_LIBRARY "${CMAKE_CURRENT_BINARY_DIR}/wolfssl/libwolfssl.a") + endif() + + # "${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/wolfssl/" provides openssl headers + # So that curl can be built on an environment where openssl headers are not provided + set(WolfSSL_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/" "${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/wolfssl/" "${CMAKE_CURRENT_BINARY_DIR}/wolfssl/") +endif() # CURL add_subdirectory(curl EXCLUDE_FROM_ALL) -# MINIUPNP -add_subdirectory(miniupnp EXCLUDE_FROM_ALL) - -# RTMIDI -add_subdirectory(rtmidi EXCLUDE_FROM_ALL) - -# OPENCV -add_subdirectory(opencv EXCLUDE_FROM_ALL) - -# FUSION -add_subdirectory(fusion EXCLUDE_FROM_ALL) - # add nice ALIAS targets for ease of use if(USE_SYSTEM_LIBUSB) add_library(3rdparty::libusb ALIAS usb-1.0-shared) @@ -364,17 +366,18 @@ else() add_library(3rdparty::libusb ALIAS usb-1.0-static) endif() add_library(3rdparty::zlib ALIAS 3rdparty_zlib) -add_library(3rdparty::zstd ALIAS 3rdparty_zstd) -add_library(3rdparty::7zip ALIAS 3rdparty_7zip) +add_library(3rdparty::7z ALIAS 3rdparty_7z) add_library(3rdparty::flatbuffers ALIAS 3rdparty_flatbuffers) add_library(3rdparty::pugixml ALIAS pugixml) -add_library(3rdparty::glslang ALIAS 3rdparty_glslang) add_library(3rdparty::yaml-cpp ALIAS yaml-cpp) +add_library(3rdparty::xxhash ALIAS xxhash) add_library(3rdparty::hidapi ALIAS 3rdparty_hidapi) add_library(3rdparty::libpng ALIAS ${LIBPNG_TARGET}) add_library(3rdparty::opengl ALIAS 3rdparty_opengl) add_library(3rdparty::stblib ALIAS 3rdparty_stblib) add_library(3rdparty::discordRPC ALIAS 3rdparty_discordRPC) +add_library(3rdparty::alsa ALIAS ${ALSA_TARGET}) +add_library(3rdparty::pulse ALIAS ${PULSE_TARGET}) add_library(3rdparty::faudio ALIAS ${FAUDIO_TARGET}) add_library(3rdparty::libevdev ALIAS ${LIBEVDEV_TARGET}) add_library(3rdparty::vulkan ALIAS ${VULKAN_TARGET}) @@ -382,10 +385,4 @@ add_library(3rdparty::openal ALIAS 3rdparty_openal) add_library(3rdparty::ffmpeg ALIAS 3rdparty_ffmpeg) add_library(3rdparty::glew ALIAS 3rdparty_glew) add_library(3rdparty::wolfssl ALIAS wolfssl) -add_library(3rdparty::libcurl ALIAS 3rdparty_libcurl) -add_library(3rdparty::soundtouch ALIAS soundtouch) -add_library(3rdparty::sdl3 ALIAS ${SDL3_TARGET}) -add_library(3rdparty::miniupnpc ALIAS libminiupnpc-static) -add_library(3rdparty::rtmidi ALIAS rtmidi) -add_library(3rdparty::opencv ALIAS ${OPENCV_TARGET}) -add_library(3rdparty::fusion ALIAS Fusion) +add_library(3rdparty::libcurl ALIAS libcurl) diff --git a/3rdparty/DetectArchitecture.cmake b/3rdparty/DetectArchitecture.cmake deleted file mode 100644 index dcdb0e2a70..0000000000 --- a/3rdparty/DetectArchitecture.cmake +++ /dev/null @@ -1,63 +0,0 @@ -# From https://github.com/merryhime/dynarmic -include(CheckSymbolExists) - -if (CMAKE_OSX_ARCHITECTURES) - set(DYNARMIC_MULTIARCH_BUILD 1) - set(ARCHITECTURE "${CMAKE_OSX_ARCHITECTURES}") - return() -endif() - -function(detect_architecture symbol arch) - if (NOT DEFINED ARCHITECTURE) - set(CMAKE_REQUIRED_QUIET YES) - check_symbol_exists("${symbol}" "" DETECT_ARCHITECTURE_${arch}) - unset(CMAKE_REQUIRED_QUIET) - - if (DETECT_ARCHITECTURE_${arch}) - set(ARCHITECTURE "${arch}" PARENT_SCOPE) - endif() - - unset(DETECT_ARCHITECTURE_${arch} CACHE) - endif() -endfunction() - -detect_architecture("__ARM64__" arm64) -detect_architecture("__aarch64__" arm64) -detect_architecture("_M_ARM64" arm64) - -detect_architecture("__arm__" arm) -detect_architecture("__TARGET_ARCH_ARM" arm) -detect_architecture("_M_ARM" arm) - -detect_architecture("__x86_64" x86_64) -detect_architecture("__x86_64__" x86_64) -detect_architecture("__amd64" x86_64) -detect_architecture("_M_X64" x86_64) - -detect_architecture("__i386" x86) -detect_architecture("__i386__" x86) -detect_architecture("_M_IX86" x86) - -detect_architecture("__ia64" ia64) -detect_architecture("__ia64__" ia64) -detect_architecture("_M_IA64" ia64) - -detect_architecture("__mips" mips) -detect_architecture("__mips__" mips) -detect_architecture("_M_MRX000" mips) - -detect_architecture("__ppc64__" ppc64) -detect_architecture("__powerpc64__" ppc64) - -detect_architecture("__ppc__" ppc) -detect_architecture("__ppc" ppc) -detect_architecture("__powerpc__" ppc) -detect_architecture("_ARCH_COM" ppc) -detect_architecture("_ARCH_PWR" ppc) -detect_architecture("_ARCH_PPC" ppc) -detect_architecture("_M_MPPC" ppc) -detect_architecture("_M_PPC" ppc) - -detect_architecture("__riscv" riscv) - -detect_architecture("__EMSCRIPTEN__" wasm) diff --git a/3rdparty/FAudio b/3rdparty/FAudio index e6ddfabab2..1f6daa2382 160000 --- a/3rdparty/FAudio +++ b/3rdparty/FAudio @@ -1 +1 @@ -Subproject commit e6ddfabab2efbc8765750039634fe5e24ac31205 +Subproject commit 1f6daa2382d5ea8217e626ef4908879bdef7c2c6 diff --git a/3rdparty/GL/glext.h b/3rdparty/GL/glext.h index 276a962a96..a28de762ef 100644 --- a/3rdparty/GL/glext.h +++ b/3rdparty/GL/glext.h @@ -32,9 +32,9 @@ extern "C" { #define GLAPI extern #endif -#define GL_GLEXT_VERSION 20250203 +#define GL_GLEXT_VERSION 20210420 -#include +#include "khrplatform.h" /* Generated C header for: * API: gl @@ -5397,12 +5397,12 @@ typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severi typedef void (APIENTRYP PFNGLDEBUGMESSAGEENABLEAMDPROC) (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTAMDPROC) (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf); typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKAMDPROC) (GLDEBUGPROCAMD callback, void *userParam); -typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message); +typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message); #ifdef GL_GLEXT_PROTOTYPES GLAPI void APIENTRY glDebugMessageEnableAMD (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); GLAPI void APIENTRY glDebugMessageInsertAMD (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf); GLAPI void APIENTRY glDebugMessageCallbackAMD (GLDEBUGPROCAMD callback, void *userParam); -GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message); +GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message); #endif #endif /* GL_AMD_debug_output */ @@ -7370,16 +7370,6 @@ GLAPI void APIENTRY glBlitFramebufferEXT (GLint srcX0, GLint srcY0, GLint srcX1, #endif #endif /* GL_EXT_framebuffer_blit */ -#ifndef GL_EXT_framebuffer_blit_layers -#define GL_EXT_framebuffer_blit_layers 1 -typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYERSEXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); -typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYEREXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter); -#ifdef GL_GLEXT_PROTOTYPES -GLAPI void APIENTRY glBlitFramebufferLayersEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); -GLAPI void APIENTRY glBlitFramebufferLayerEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter); -#endif -#endif /* GL_EXT_framebuffer_blit_layers */ - #ifndef GL_EXT_framebuffer_multisample #define GL_EXT_framebuffer_multisample 1 #define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB @@ -8224,10 +8214,6 @@ GLAPI void APIENTRY glMemoryBarrierEXT (GLbitfield barriers); #define GL_EXT_shader_integer_mix 1 #endif /* GL_EXT_shader_integer_mix */ -#ifndef GL_EXT_shader_samples_identical -#define GL_EXT_shader_samples_identical 1 -#endif /* GL_EXT_shader_samples_identical */ - #ifndef GL_EXT_shadow_funcs #define GL_EXT_shadow_funcs 1 #endif /* GL_EXT_shadow_funcs */ @@ -8639,36 +8625,6 @@ GLAPI void APIENTRY glTextureNormalEXT (GLenum mode); #define GL_RGBA_SNORM 0x8F93 #endif /* GL_EXT_texture_snorm */ -#ifndef GL_EXT_texture_storage -#define GL_EXT_texture_storage 1 -#define GL_TEXTURE_IMMUTABLE_FORMAT_EXT 0x912F -#define GL_RGBA32F_EXT 0x8814 -#define GL_RGB32F_EXT 0x8815 -#define GL_ALPHA32F_EXT 0x8816 -#define GL_LUMINANCE32F_EXT 0x8818 -#define GL_LUMINANCE_ALPHA32F_EXT 0x8819 -#define GL_RGBA16F_EXT 0x881A -#define GL_RGB16F_EXT 0x881B -#define GL_ALPHA16F_EXT 0x881C -#define GL_LUMINANCE16F_EXT 0x881E -#define GL_LUMINANCE_ALPHA16F_EXT 0x881F -#define GL_BGRA8_EXT 0x93A1 -#define GL_R8_EXT 0x8229 -#define GL_RG8_EXT 0x822B -#define GL_R32F_EXT 0x822E -#define GL_RG32F_EXT 0x8230 -#define GL_R16F_EXT 0x822D -#define GL_RG16F_EXT 0x822F -typedef void (APIENTRYP PFNGLTEXSTORAGE1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); -typedef void (APIENTRYP PFNGLTEXSTORAGE2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); -typedef void (APIENTRYP PFNGLTEXSTORAGE3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); -#ifdef GL_GLEXT_PROTOTYPES -GLAPI void APIENTRY glTexStorage1DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width); -GLAPI void APIENTRY glTexStorage2DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height); -GLAPI void APIENTRY glTexStorage3DEXT (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth); -#endif -#endif /* GL_EXT_texture_storage */ - #ifndef GL_EXT_texture_swizzle #define GL_EXT_texture_swizzle 1 #define GL_TEXTURE_SWIZZLE_R_EXT 0x8E42 @@ -9404,11 +9360,6 @@ GLAPI void APIENTRY glResizeBuffersMESA (void); #define GL_MESA_shader_integer_functions 1 #endif /* GL_MESA_shader_integer_functions */ -#ifndef GL_MESA_texture_const_bandwidth -#define GL_MESA_texture_const_bandwidth 1 -#define GL_CONST_BW_TILING_MESA 0x8BBE -#endif /* GL_MESA_texture_const_bandwidth */ - #ifndef GL_MESA_tile_raster_order #define GL_MESA_tile_raster_order 1 #define GL_TILE_RASTER_ORDER_FIXED_MESA 0x8BB8 @@ -10263,6 +10214,12 @@ typedef void (APIENTRYP PFNGLMULTITEXCOORD3HNVPROC) (GLenum target, GLhalfNV s, typedef void (APIENTRYP PFNGLMULTITEXCOORD3HVNVPROC) (GLenum target, const GLhalfNV *v); typedef void (APIENTRYP PFNGLMULTITEXCOORD4HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); typedef void (APIENTRYP PFNGLMULTITEXCOORD4HVNVPROC) (GLenum target, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog); +typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight); typedef void (APIENTRYP PFNGLVERTEXATTRIB1HNVPROC) (GLuint index, GLhalfNV x); typedef void (APIENTRYP PFNGLVERTEXATTRIB1HVNVPROC) (GLuint index, const GLhalfNV *v); typedef void (APIENTRYP PFNGLVERTEXATTRIB2HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y); @@ -10275,12 +10232,6 @@ typedef void (APIENTRYP PFNGLVERTEXATTRIBS1HVNVPROC) (GLuint index, GLsizei n, c typedef void (APIENTRYP PFNGLVERTEXATTRIBS2HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); typedef void (APIENTRYP PFNGLVERTEXATTRIBS3HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); typedef void (APIENTRYP PFNGLVERTEXATTRIBS4HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); -typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog); -typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog); -typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue); -typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v); -typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight); -typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight); #ifdef GL_GLEXT_PROTOTYPES GLAPI void APIENTRY glVertex2hNV (GLhalfNV x, GLhalfNV y); GLAPI void APIENTRY glVertex2hvNV (const GLhalfNV *v); @@ -10310,6 +10261,12 @@ GLAPI void APIENTRY glMultiTexCoord3hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLAPI void APIENTRY glMultiTexCoord3hvNV (GLenum target, const GLhalfNV *v); GLAPI void APIENTRY glMultiTexCoord4hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); GLAPI void APIENTRY glMultiTexCoord4hvNV (GLenum target, const GLhalfNV *v); +GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog); +GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog); +GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight); +GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight); GLAPI void APIENTRY glVertexAttrib1hNV (GLuint index, GLhalfNV x); GLAPI void APIENTRY glVertexAttrib1hvNV (GLuint index, const GLhalfNV *v); GLAPI void APIENTRY glVertexAttrib2hNV (GLuint index, GLhalfNV x, GLhalfNV y); @@ -10322,12 +10279,6 @@ GLAPI void APIENTRY glVertexAttribs1hvNV (GLuint index, GLsizei n, const GLhalfN GLAPI void APIENTRY glVertexAttribs2hvNV (GLuint index, GLsizei n, const GLhalfNV *v); GLAPI void APIENTRY glVertexAttribs3hvNV (GLuint index, GLsizei n, const GLhalfNV *v); GLAPI void APIENTRY glVertexAttribs4hvNV (GLuint index, GLsizei n, const GLhalfNV *v); -GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog); -GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog); -GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue); -GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v); -GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight); -GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight); #endif #endif /* GL_NV_half_float */ @@ -11464,10 +11415,6 @@ GLAPI void APIENTRY glDrawTransformFeedbackNV (GLenum mode, GLuint id); #endif #endif /* GL_NV_transform_feedback2 */ -#ifndef GL_NV_uniform_buffer_std430_layout -#define GL_NV_uniform_buffer_std430_layout 1 -#endif /* GL_NV_uniform_buffer_std430_layout */ - #ifndef GL_NV_uniform_buffer_unified_memory #define GL_NV_uniform_buffer_unified_memory 1 #define GL_UNIFORM_BUFFER_UNIFIED_NV 0x936E @@ -11983,10 +11930,8 @@ GLAPI void APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum s #define GL_MAX_VIEWS_OVR 0x9631 #define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633 typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); -typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); #ifdef GL_GLEXT_PROTOTYPES GLAPI void APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); -GLAPI void APIENTRY glNamedFramebufferTextureMultiviewOVR (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); #endif #endif /* GL_OVR_multiview */ diff --git a/3rdparty/GL/KHR/khrplatform.h b/3rdparty/GL/khrplatform.h similarity index 92% rename from 3rdparty/GL/KHR/khrplatform.h rename to 3rdparty/GL/khrplatform.h index 01646449ca..dd22d92701 100644 --- a/3rdparty/GL/KHR/khrplatform.h +++ b/3rdparty/GL/khrplatform.h @@ -153,20 +153,6 @@ typedef int64_t khronos_int64_t; typedef uint64_t khronos_uint64_t; #define KHRONOS_SUPPORT_INT64 1 #define KHRONOS_SUPPORT_FLOAT 1 -/* - * To support platform where unsigned long cannot be used interchangeably with - * inptr_t (e.g. CHERI-extended ISAs), we can use the stdint.h intptr_t. - * Ideally, we could just use (u)intptr_t everywhere, but this could result in - * ABI breakage if khronos_uintptr_t is changed from unsigned long to - * unsigned long long or similar (this results in different C++ name mangling). - * To avoid changes for existing platforms, we restrict usage of intptr_t to - * platforms where the size of a pointer is larger than the size of long. - */ -#if defined(__SIZEOF_LONG__) && defined(__SIZEOF_POINTER__) -#if __SIZEOF_POINTER__ > __SIZEOF_LONG__ -#define KHRONOS_USE_INTPTR_T -#endif -#endif #elif defined(__VMS ) || defined(__sgi) @@ -249,21 +235,14 @@ typedef unsigned short int khronos_uint16_t; * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears * to be the only LLP64 architecture in current use. */ -#ifdef KHRONOS_USE_INTPTR_T -typedef intptr_t khronos_intptr_t; -typedef uintptr_t khronos_uintptr_t; -#elif defined(_WIN64) +#ifdef _WIN64 typedef signed long long int khronos_intptr_t; typedef unsigned long long int khronos_uintptr_t; -#else -typedef signed long int khronos_intptr_t; -typedef unsigned long int khronos_uintptr_t; -#endif - -#if defined(_WIN64) typedef signed long long int khronos_ssize_t; typedef unsigned long long int khronos_usize_t; #else +typedef signed long int khronos_intptr_t; +typedef unsigned long int khronos_uintptr_t; typedef signed long int khronos_ssize_t; typedef unsigned long int khronos_usize_t; #endif diff --git a/3rdparty/GPUOpen/VulkanMemoryAllocator b/3rdparty/GPUOpen/VulkanMemoryAllocator deleted file mode 160000 index 1d8f600fd4..0000000000 --- a/3rdparty/GPUOpen/VulkanMemoryAllocator +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1d8f600fd424278486eade7ed3e877c99f0846b1 diff --git a/3rdparty/GPUOpen/include/ffx_fsr1.h b/3rdparty/GPUOpen/include/ffx_fsr1.h index 4e0b3d5485..15ecfde5c2 100644 --- a/3rdparty/GPUOpen/include/ffx_fsr1.h +++ b/3rdparty/GPUOpen/include/ffx_fsr1.h @@ -747,12 +747,12 @@ AF1 sharpness){ // Immediate constants for peak range. AF2 peakC=AF2(1.0,-1.0*4.0); // Limiters, these need to be high precision RCPs. - AF1 hitMinR=min(mn4R,eR)*ARcpF1(AF1_(4.0)*mx4R); - AF1 hitMinG=min(mn4G,eG)*ARcpF1(AF1_(4.0)*mx4G); - AF1 hitMinB=min(mn4B,eB)*ARcpF1(AF1_(4.0)*mx4B); - AF1 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpF1(AF1_(4.0)*mn4R+peakC.y); - AF1 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpF1(AF1_(4.0)*mn4G+peakC.y); - AF1 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpF1(AF1_(4.0)*mn4B+peakC.y); + AF1 hitMinR=mn4R*ARcpF1(AF1_(4.0)*mx4R); + AF1 hitMinG=mn4G*ARcpF1(AF1_(4.0)*mx4G); + AF1 hitMinB=mn4B*ARcpF1(AF1_(4.0)*mx4B); + AF1 hitMaxR=(peakC.x-mx4R)*ARcpF1(AF1_(4.0)*mn4R+peakC.y); + AF1 hitMaxG=(peakC.x-mx4G)*ARcpF1(AF1_(4.0)*mn4G+peakC.y); + AF1 hitMaxB=(peakC.x-mx4B)*ARcpF1(AF1_(4.0)*mn4B+peakC.y); AF1 lobeR=max(-hitMinR,hitMaxR); AF1 lobeG=max(-hitMinG,hitMaxG); AF1 lobeB=max(-hitMinB,hitMaxB); @@ -845,12 +845,12 @@ AF1 sharpness){ // Immediate constants for peak range. AH2 peakC=AH2(1.0,-1.0*4.0); // Limiters, these need to be high precision RCPs. - AH1 hitMinR=min(mn4R,eR)*ARcpH1(AH1_(4.0)*mx4R); - AH1 hitMinG=min(mn4G,eG)*ARcpH1(AH1_(4.0)*mx4G); - AH1 hitMinB=min(mn4B,eB)*ARcpH1(AH1_(4.0)*mx4B); - AH1 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpH1(AH1_(4.0)*mn4R+peakC.y); - AH1 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpH1(AH1_(4.0)*mn4G+peakC.y); - AH1 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpH1(AH1_(4.0)*mn4B+peakC.y); + AH1 hitMinR=mn4R*ARcpH1(AH1_(4.0)*mx4R); + AH1 hitMinG=mn4G*ARcpH1(AH1_(4.0)*mx4G); + AH1 hitMinB=mn4B*ARcpH1(AH1_(4.0)*mx4B); + AH1 hitMaxR=(peakC.x-mx4R)*ARcpH1(AH1_(4.0)*mn4R+peakC.y); + AH1 hitMaxG=(peakC.x-mx4G)*ARcpH1(AH1_(4.0)*mn4G+peakC.y); + AH1 hitMaxB=(peakC.x-mx4B)*ARcpH1(AH1_(4.0)*mn4B+peakC.y); AH1 lobeR=max(-hitMinR,hitMaxR); AH1 lobeG=max(-hitMinG,hitMaxG); AH1 lobeB=max(-hitMinB,hitMaxB); @@ -963,12 +963,12 @@ AF1 sharpness){ // Immediate constants for peak range. AH2 peakC=AH2(1.0,-1.0*4.0); // Limiters, these need to be high precision RCPs. - AH2 hitMinR=min(mn4R,eR)*ARcpH2(AH2_(4.0)*mx4R); - AH2 hitMinG=min(mn4G,eG)*ARcpH2(AH2_(4.0)*mx4G); - AH2 hitMinB=min(mn4B,eB)*ARcpH2(AH2_(4.0)*mx4B); - AH2 hitMaxR=(peakC.x-max(mx4R,eR))*ARcpH2(AH2_(4.0)*mn4R+peakC.y); - AH2 hitMaxG=(peakC.x-max(mx4G,eG))*ARcpH2(AH2_(4.0)*mn4G+peakC.y); - AH2 hitMaxB=(peakC.x-max(mx4B,eB))*ARcpH2(AH2_(4.0)*mn4B+peakC.y); + AH2 hitMinR=mn4R*ARcpH2(AH2_(4.0)*mx4R); + AH2 hitMinG=mn4G*ARcpH2(AH2_(4.0)*mx4G); + AH2 hitMinB=mn4B*ARcpH2(AH2_(4.0)*mx4B); + AH2 hitMaxR=(peakC.x-mx4R)*ARcpH2(AH2_(4.0)*mn4R+peakC.y); + AH2 hitMaxG=(peakC.x-mx4G)*ARcpH2(AH2_(4.0)*mn4G+peakC.y); + AH2 hitMaxB=(peakC.x-mx4B)*ARcpH2(AH2_(4.0)*mn4B+peakC.y); AH2 lobeR=max(-hitMinR,hitMaxR); AH2 lobeG=max(-hitMinG,hitMaxG); AH2 lobeB=max(-hitMinB,hitMaxB); diff --git a/3rdparty/GPUOpen/include/vk_mem_alloc.h b/3rdparty/GPUOpen/include/vk_mem_alloc.h new file mode 100644 index 0000000000..bb72d53f57 --- /dev/null +++ b/3rdparty/GPUOpen/include/vk_mem_alloc.h @@ -0,0 +1,18120 @@ +// +// Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +#ifdef __cplusplus +extern "C" { +#endif + +/** \mainpage Vulkan Memory Allocator + +Version 2.3.0 (2019-12-04) + +Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT + +Documentation of all members: vk_mem_alloc.h + +\section main_table_of_contents Table of contents + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - [Buddy allocation algorithm](@ref buddy_algorithm) + - \subpage defragmentation + - [Defragmenting CPU memory](@ref defragmentation_cpu) + - [Defragmenting GPU memory](@ref defragmentation_gpu) + - [Additional notes](@ref defragmentation_additional_notes) + - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm) + - \subpage lost_allocations + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - \subpage record_and_replay +- \subpage usage_patterns + - [Common mistakes](@ref usage_patterns_common_mistakes) + - [Simple patterns](@ref usage_patterns_simple) + - [Advanced patterns](@ref usage_patterns_advanced) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) + - \subpage vk_khr_dedicated_allocation +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\section main_see_also See also + +- [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) +- [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + + + + +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +You don't need to build it as a separate library project. +You can add this file directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, you will get linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exacly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose. + +Note on language: This library is written in C++, but has C-compatible interface. +Thus you can include and use vk_mem_alloc.h in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. + +Please note that this library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. + + +\section quick_start_initialization Initialization + +At program startup: + +-# Initialize Vulkan to have `VkPhysicalDevice` and `VkDevice` object. +-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by + calling vmaCreateAllocator(). + +\code +VmaAllocatorCreateInfo allocatorInfo = {}; +allocatorInfo.physicalDevice = physicalDevice; +allocatorInfo.device = device; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +vmaDestroyAllocator(allocator); +\endcode + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# If you want to create a buffer or an image, allocate memory for it and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). This is the easiest and recommended way to use this library. + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +For more details, see description of this enum. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and used for rendering every frame, you can +do it using following code: + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +If you use VmaAllocationCreateInfo::usage, it is just internally converted to +a set of required and preferred flags. + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That's the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +Because of this, Vulkan Memory Allocator provides following facilities: + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it's implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: + +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData; + +VmaAllocator allocator; +VkBuffer constantBuffer; +VmaAllocation constantBufferAllocation; + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Kepping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +There are some exceptions though, when you should consider mapping memory only for a short period of time: + +- When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2), + device is discrete AMD GPU, + and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory + (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU), + then whenever a memory block allocated from this memory type stays mapped + for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this + block is migrated by WDDM to system RAM, which degrades performance. It doesn't + matter if that particular memory block is actually used by the command buffer + being submitted. +- On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175) + which requires unmapping before GPU can see updated texture. +- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`. + +Also, Windows drivers from all 3 **PC** GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on this platform you may not need to bother. + +\section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable + +It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping) +despite it wasn't explicitly requested. +For example, application may work on integrated graphics with unified memory (like Intel) or +allocation from video memory might have failed, so the library chose system memory as fallback. + +You can detect this case and map such allocation to access its memory on CPU directly, +instead of launching a transfer operation. +In order to do that: inspect `allocInfo.memoryType`, call vmaGetMemoryTypeProperties(), +and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag in properties of that memory type. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +VkMemoryPropertyFlags memFlags; +vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags); +if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) +{ + // Allocation ended up in mappable memory. You can map it and access it directly. + void* mappedData; + vmaMapMemory(allocator, alloc, &mappedData); + memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); + vmaUnmapMemory(allocator, alloc); +} +else +{ + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. +} +\endcode + +You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations +that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY). +If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly. +If not, the flag is just ignored. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +if(allocInfo.pUserData != nullptr) +{ + // Allocation ended up in mappable memory. + // It's persistently mapped. You can access it directly. + memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +} +else +{ + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. +} +\endcode + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it's physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetBudget(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStats(). vmaGetBudget() can be called every frame or even before every +allocation, while vmaCalculateStats() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +Some other allocations become lost instead to make room for it, if the mechanism of +[lost allocations](@ref lost_allocations) is used. +If that is not possible, the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +Please note that creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will try to allocate memory blocks without checking whether they +fit within budget. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = ... +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +vmaCreatePool(allocator, &poolCreateInfo, &pool); + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Whatever. +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed. + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed. + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Pools with linear algorithm support [lost allocations](@ref lost_allocations) when used as ring buffer. +If there is not enough free space for a new allocation, but existing allocations +from the front of the queue can become lost, they become lost and the allocation +succeeds. + +![Ring buffer with lost allocations](../gfx/Linear_allocator_6_ring_buffer_lost.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\section buddy_algorithm Buddy allocation algorithm + +There is another allocation algorithm that can be used with custom pools, called +"buddy". Its internal data structure is based on a tree of blocks, each having +size that is a power of two and a half of its parent's size. When you want to +allocate memory of certain size, a free node in the tree is located. If it's too +large, it is recursively split into two halves (called "buddies"). However, if +requested allocation size is not a power of two, the size of a tree node is +aligned up to the nearest power of two and the remaining space is wasted. When +two buddy nodes become free, they are merged back into one larger node. + +![Buddy allocator](../gfx/Buddy_allocator.png) + +The advantage of buddy allocation algorithm over default algorithm is faster +allocation and deallocation, as well as smaller external fragmentation. The +disadvantage is more wasted space (internal fragmentation). + +For more information, please read ["Buddy memory allocation" on Wikipedia](https://en.wikipedia.org/wiki/Buddy_memory_allocation) +or other sources that describe this concept in general. + +To use buddy allocation algorithm with a custom pool, add flag +#VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. + +Several limitations apply to pools that use buddy algorithm: + +- It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two. + Otherwise, only largest power of two smaller than the size is used for + allocations. The remaining space always stays unused. +- [Margins](@ref debugging_memory_usage_margins) and + [corruption detection](@ref debugging_memory_usage_corruption_detection) + don't work in such pools. +- [Lost allocations](@ref lost_allocations) don't work in such pools. You can + use them, but they never become lost. Support may be added in the future. +- [Defragmentation](@ref defragmentation) doesn't work with allocations made from + such pool. + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature: +structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd(). +Given set of allocations, +this function can move them to compact used memory, ensure more continuous free +space and possibly also free some `VkDeviceMemory` blocks. + +What the defragmentation does is: + +- Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset. + After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or + VmaAllocationInfo::offset changes. You must query them again using + vmaGetAllocationInfo() if you need them. +- Moves actual data in memory. + +What it doesn't do, so you need to do it yourself: + +- Recreate buffers and images that were bound to allocations that were defragmented and + bind them with their new places in memory. + You must use `vkDestroyBuffer()`, `vkDestroyImage()`, + `vkCreateBuffer()`, `vkCreateImage()`, vmaBindBufferMemory(), vmaBindImageMemory() + for that purpose and NOT vmaDestroyBuffer(), + vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to + destroy or create allocation objects! +- Recreate views and update descriptors that point to these buffers and images. + +\section defragmentation_cpu Defragmenting CPU memory + +Following example demonstrates how you can run defragmentation on CPU. +Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented. +Others are ignored. + +The way it works is: + +- It temporarily maps entire memory blocks when necessary. +- It moves data using `memmove()` function. + +\code +// Given following variables already initialized: +VkDevice device; +VmaAllocator allocator; +std::vector buffers; +std::vector allocations; + + +const uint32_t allocCount = (uint32_t)allocations.size(); +std::vector allocationsChanged(allocCount); + +VmaDefragmentationInfo2 defragInfo = {}; +defragInfo.allocationCount = allocCount; +defragInfo.pAllocations = allocations.data(); +defragInfo.pAllocationsChanged = allocationsChanged.data(); +defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit. +defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit. + +VmaDefragmentationContext defragCtx; +vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); +vmaDefragmentationEnd(allocator, defragCtx); + +for(uint32_t i = 0; i < allocCount; ++i) +{ + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); + + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } +} +\endcode + +Setting VmaDefragmentationInfo2::pAllocationsChanged is optional. +This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index +has been modified during defragmentation. +You can pass null, but you then need to query every allocation passed to defragmentation +for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it. + +If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools), +you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools +instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations +to defragment all allocations in given pools. +You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case. +You can also combine both methods. + +\section defragmentation_gpu Defragmenting GPU memory + +It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`. +To do that, you need to pass a command buffer that meets requirements as described in +VmaDefragmentationInfo2::commandBuffer. The way it works is: + +- It creates temporary buffers and binds them to entire memory blocks when necessary. +- It issues `vkCmdCopyBuffer()` to passed command buffer. + +Example: + +\code +// Given following variables already initialized: +VkDevice device; +VmaAllocator allocator; +VkCommandBuffer commandBuffer; +std::vector buffers; +std::vector allocations; + + +const uint32_t allocCount = (uint32_t)allocations.size(); +std::vector allocationsChanged(allocCount); + +VkCommandBufferBeginInfo cmdBufBeginInfo = ...; +vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo); + +VmaDefragmentationInfo2 defragInfo = {}; +defragInfo.allocationCount = allocCount; +defragInfo.pAllocations = allocations.data(); +defragInfo.pAllocationsChanged = allocationsChanged.data(); +defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time. +defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time. +defragInfo.commandBuffer = commandBuffer; + +VmaDefragmentationContext defragCtx; +vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); + +vkEndCommandBuffer(commandBuffer); + +// Submit commandBuffer. +// Wait for a fence that ensures commandBuffer execution finished. + +vmaDefragmentationEnd(allocator, defragCtx); + +for(uint32_t i = 0; i < allocCount; ++i) +{ + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); + + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } +} +\endcode + +You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters. +The library automatically chooses best method to defragment each memory pool. + +You may try not to block your entire program to wait until defragmentation finishes, +but do it in the background, as long as you carefully fullfill requirements described +in function vmaDefragmentationBegin(). + +\section defragmentation_additional_notes Additional notes + +It is only legal to defragment allocations bound to: + +- buffers +- images created with `VK_IMAGE_CREATE_ALIAS_BIT`, `VK_IMAGE_TILING_LINEAR`, and + being currently in `VK_IMAGE_LAYOUT_GENERAL` or `VK_IMAGE_LAYOUT_PREINITIALIZED`. + +Defragmentation of images created with `VK_IMAGE_TILING_OPTIMAL` or in any other +layout may give undefined results. + +If you defragment allocations bound to images, new images to be bound to new +memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED` +and then transitioned to their original layout from before defragmentation if +needed using an image memory barrier. + +While using defragmentation, you may experience validation layer warnings, which you just need to ignore. +See [Validation layer warnings](@ref general_considerations_validation_layer_warnings). + +Please don't expect memory to be fully compacted after defragmentation. +Algorithms inside are based on some heuristics that try to maximize number of Vulkan +memory blocks to make totally empty to release them, as well as to maximimze continuous +empty space inside remaining blocks, while minimizing the number and size of allocations that +need to be moved. Some fragmentation may still remain - this is normal. + +\section defragmentation_custom_algorithm Writing custom defragmentation algorithm + +If you want to implement your own, custom defragmentation algorithm, +there is infrastructure prepared for that, +but it is not exposed through the library API - you need to hack its source code. +Here are steps needed to do this: + +-# Main thing you need to do is to define your own class derived from base abstract + class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods. + See definition and comments of this class for details. +-# Your code needs to interact with device memory block metadata. + If you need more access to its data than it's provided by its public interface, + declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`. +-# If you want to create a flag that would enable your algorithm or pass some additional + flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in + VmaDefragmentationInfo2::flags. +-# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object + of your new class whenever needed. + + +\page lost_allocations Lost allocations + +If your game oversubscribes video memory, if may work OK in previous-generation +graphics APIs (DirectX 9, 10, 11, OpenGL) because resources are automatically +paged to system RAM. In Vulkan you can't do it because when you run out of +memory, an allocation just fails. If you have more data (e.g. textures) that can +fit into VRAM and you don't need it all at once, you may want to upload them to +GPU on demand and "push out" ones that are not used for a long time to make room +for the new ones, effectively using VRAM (or a cartain memory pool) as a form of +cache. Vulkan Memory Allocator can help you with that by supporting a concept of +"lost allocations". + +To create an allocation that can become lost, include #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT +flag in VmaAllocationCreateInfo::flags. Before using a buffer or image bound to +such allocation in every new frame, you need to query it if it's not lost. +To check it, call vmaTouchAllocation(). +If the allocation is lost, you should not use it or buffer/image bound to it. +You mustn't forget to destroy this allocation and this buffer/image. +vmaGetAllocationInfo() can also be used for checking status of the allocation. +Allocation is lost when returned VmaAllocationInfo::deviceMemory == `VK_NULL_HANDLE`. + +To create an allocation that can make some other allocations lost to make room +for it, use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag. You will +usually use both flags #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT and +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT at the same time. + +Warning! Current implementation uses quite naive, brute force algorithm, +which can make allocation calls that use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT +flag quite slow. A new, more optimal algorithm and data structure to speed this +up is planned for the future. + +Q: When interleaving creation of new allocations with usage of existing ones, +how do you make sure that an allocation won't become lost while it's used in the +current frame? + +It is ensured because vmaTouchAllocation() / vmaGetAllocationInfo() not only returns allocation +status/parameters and checks whether it's not lost, but when it's not, it also +atomically marks it as used in the current frame, which makes it impossible to +become lost in that frame. It uses lockless algorithm, so it works fast and +doesn't involve locking any internal mutex. + +Q: What if my allocation may still be in use by the GPU when it's rendering a +previous frame while I already submit new frame on the CPU? + +You can make sure that allocations "touched" by vmaTouchAllocation() / vmaGetAllocationInfo() will not +become lost for a number of additional frames back from the current one by +specifying this number as VmaAllocatorCreateInfo::frameInUseCount (for default +memory pool) and VmaPoolCreateInfo::frameInUseCount (for custom pool). + +Q: How do you inform the library when new frame starts? + +You need to call function vmaSetCurrentFrameIndex(). + +Example code: + +\code +struct MyBuffer +{ + VkBuffer m_Buf = nullptr; + VmaAllocation m_Alloc = nullptr; + + // Called when the buffer is really needed in the current frame. + void EnsureBuffer(); +}; + +void MyBuffer::EnsureBuffer() +{ + // Buffer has been created. + if(m_Buf != VK_NULL_HANDLE) + { + // Check if its allocation is not lost + mark it as used in current frame. + if(vmaTouchAllocation(allocator, m_Alloc)) + { + // It's all OK - safe to use m_Buf. + return; + } + } + + // Buffer not yet exists or lost - destroy and recreate it. + + vmaDestroyBuffer(allocator, m_Buf, m_Alloc); + + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = 1024; + bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT | + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + + vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr); +} +\endcode + +When using lost allocations, you may see some Vulkan validation layer warnings +about overlapping regions of memory bound to different kinds of buffers and +images. This is still valid as long as you implement proper handling of lost +allocations (like in the example above) and don't use them. + +You can create an allocation that is already in lost state from the beginning using function +vmaCreateLostAllocation(). It may be useful if you need a "dummy" allocation that is not null. + +You can call function vmaMakePoolAllocationsLost() to set all eligible allocations +in a specified custom pool to lost state. +Allocations that have been "touched" in current frame or VmaPoolCreateInfo::frameInUseCount frames back +cannot become lost. + +Q: Can I touch allocation that cannot become lost? + +Yes, although it has no visible effect. +Calls to vmaGetAllocationInfo() and vmaTouchAllocation() update last use frame index +also for allocations that cannot become lost, but the only way to observe it is to dump +internal allocator state using vmaBuildStatsString(). +You can use this feature for debugging purposes to explicitly mark allocations that you use +in current frame and then analyze JSON dump to see for how long each allocation stays unused. + + +\page statistics Statistics + +This library contains functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. +Please keep in mind that these functions need to traverse all internal data structures +to gather these information, so they may be quite time-consuming. +Don't call them too often. + +\section statistics_numeric_statistics Numeric statistics + +You can query for overall statistics of the allocator using function vmaCalculateStats(). +Information are returned using structure #VmaStats. +It contains #VmaStatInfo - number of allocated blocks, number of allocations +(occupied ranges in these blocks), number of unused (free) ranges in these blocks, +number of bytes used and unused (but still allocated from Vulkan) and other information. +They are summed across memory heaps, memory types and total for whole allocator. + +You can query for statistics of a custom pool using function vmaGetPoolStats(). +Information are returned using structure #VmaPoolStats. + +You can query for information about specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStats(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It's an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +// Fill bufferInfo... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString(), in hexadecimal form. + +\section allocation_names Allocation names + +There is alternative mode available where `pUserData` pointer is used to point to +a null-terminated string, giving a name to the allocation. To use this mode, +set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. +Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to +vmaSetAllocationUserData() must be either null or pointer to a null-terminated string. +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +// Fill imageInfo... + +std::string imageName = "Texture: "; +imageName += fileName; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; +allocCreateInfo.pUserData = imageName.c_str(); + +VkImage image; +VmaAllocation allocation; +vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr); +\endcode + +The value of `pUserData` pointer of the allocation will be different than the one +you passed when setting allocation's name - pointing to a buffer managed +internally that holds copy of the string. + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +const char* imageName = (const char*)allocInfo.pUserData; +printf("Image name: %s\n", imageName); +\endcode + +That string is also printed in JSON report created by vmaBuildStatsString(). + +\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE`. +It works also with dedicated allocations. +It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +as they cannot be mapped. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin before and after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +The margin is applied also before first and after last allocation in a block. +It may occur only once between two adjacent allocations. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. +Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) before and after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it's not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\page record_and_replay Record and replay + +\section record_and_replay_introduction Introduction + +While using the library, sequence of calls to its functions together with their +parameters can be recorded to a file and later replayed using standalone player +application. It can be useful to: + +- Test correctness - check if same sequence of calls will not cause crash or + failures on a target platform. +- Gather statistics - see number of allocations, peak memory usage, number of + calls etc. +- Benchmark performance - see how much time it takes to replay the whole + sequence. + +\section record_and_replay_usage Usage + +Recording functionality is disabled by default. +To enable it, define following macro before every include of this library: + +\code +#define VMA_RECORDING_ENABLED 1 +\endcode + +To record sequence of calls to a file: Fill in +VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator +object. File is opened and written during whole lifetime of the allocator. + +To replay file: Use VmaReplay - standalone command-line program. +Precompiled binary can be found in "bin" directory. +Its source can be found in "src/VmaReplay" directory. +Its project is generated by Premake. +Command line syntax is printed when the program is launched without parameters. +Basic usage: + + VmaReplay.exe MyRecording.csv + +Documentation of file format can be found in file: "docs/Recording file format.md". +It's a human-readable, text file in CSV format (Comma Separated Values). + +\section record_and_replay_additional_considerations Additional considerations + +- Replaying file that was recorded on a different GPU (with different parameters + like `bufferImageGranularity`, `nonCoherentAtomSize`, and especially different + set of memory heaps and types) may give different performance and memory usage + results, as well as issue some warnings and errors. +- Current implementation of recording in VMA, as well as VmaReplay application, is + coded and tested only on Windows. Inclusion of recording code is driven by + `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to + add. Contributions are welcomed. + + +\page usage_patterns Recommended usage patterns + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_common_mistakes Common mistakes + +Use of CPU_TO_GPU instead of CPU_ONLY memory + +#VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be +mapped and written by the CPU, as well as read directly by the GPU - like some +buffers or textures updated every frame (dynamic). If you create a staging copy +of a resource to be written by CPU and then used as a source of transfer to +another resource placed in the GPU memory, that staging resource should be +created with #VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these +enums carefully for details. + +Unnecessary use of custom pools + +\ref custom_memory_pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them, limit maximum amount of memory they can occupy, or make some of them +push out the other through the mechanism of \ref lost_allocations. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + +\section usage_patterns_simple Simple patterns + +\subsection usage_patterns_simple_render_targets Render targets + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Create them in video memory that is fastest to access from GPU using +#VMA_MEMORY_USAGE_GPU_ONLY. + +Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension +and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them e.g. when +display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. + +\subsection usage_patterns_simple_immutable_resources Immutable resources + +When: +Any resources that you fill on CPU only once (aka "immutable") or infrequently +and then read frequently on GPU, +e.g. textures, vertex and index buffers, constant buffers that don't change often. + +What to do: +Create them in video memory that is fastest to access from GPU using +#VMA_MEMORY_USAGE_GPU_ONLY. + +To initialize content of such resource, create a CPU-side (aka "staging") copy of it +in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it, +and submit a transfer from it to the GPU resource. +You can keep the staging copy if you need it for another upload transfer in the future. +If you don't, you can destroy it or reuse this buffer for uploading different resource +after the transfer finishes. + +Prefer to create just buffers in system memory rather than images, even for uploading textures. +Use `vkCmdCopyBufferToImage()`. +Dont use images with `VK_IMAGE_TILING_LINEAR`. + +\subsection usage_patterns_dynamic_resources Dynamic resources + +When: +Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call, +written on CPU, read on GPU. + +What to do: +Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU. +You can map it and write to it directly on CPU, as well as read from it on GPU. + +This is a more complex situation. Different solutions are possible, +and the best one depends on specific GPU type, but you can use this simple approach for the start. +Prefer to write to such resource sequentially (e.g. using `memcpy`). +Don't perform random access or any reads from it on CPU, as it may be very slow. + +\subsection usage_patterns_readback Readback + +When: +Resources that contain data written by GPU that you want to read back on CPU, +e.g. results of some computations. + +What to do: +Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU. +You can write to them directly on GPU, as well as map and read them on CPU. + +\section usage_patterns_advanced Advanced patterns + +\subsection usage_patterns_integrated_graphics Detecting integrated graphics + +You can support integrated graphics (like Intel HD Graphics, AMD APU) better +by detecting it in Vulkan. +To do it, call `vkGetPhysicalDeviceProperties()`, inspect +`VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`. +When you find it, you can assume that memory is unified and all memory types are comparably fast +to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +You can then sum up sizes of all available memory heaps and treat them as useful for +your GPU resources, instead of only `DEVICE_LOCAL` ones. +You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them +directly instead of submitting explicit transfer (see below). + +\subsection usage_patterns_direct_vs_transfer Direct access versus transfer + +For resources that you frequently write on CPU and read on GPU, many solutions are possible: + +-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, + second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit tranfer each time. +-# Create just single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU, + read it directly on GPU. +-# Create just single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU, + read it directly on GPU. + +Which solution is the most efficient depends on your resource and especially on the GPU. +It is best to measure it and then make the decision. +Some general recommendations: + +- On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead + related to using a second copy and making transfer. +- For small resources (e.g. constant buffers) use (2). + Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable. + Even if the resource ends up in system memory, its data may be cached on GPU after first + fetch over PCIe bus. +- For larger resources (e.g. textures), decide between (1) and (2). + You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is + both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1). + +Similarly, for resources that you frequently write on GPU and read on CPU, multiple +solutions are possible: + +-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, + second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time. +-# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU, + map it and read it on CPU. + +You should take some measurements to decide which option is faster in case of your specific +resource. + +If you don't want to specialize your code for specific types of GPUs, you can still make +an simple optimization for cases when your resource ends up in mappable memory to use it +directly in this case instead of creating CPU-side staging copy. +For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable). + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. +The library uses its own implementation of containers by default, but you can switch to using +STL containers instead. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +\section config_Vulkan_functions Pointers to Vulkan functions + +The library uses Vulkan functions straight from the `vulkan.h` header by default. +If you want to provide your own pointers to these functions, e.g. fetched using +`vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`: + +-# Define `VMA_STATIC_VULKAN_FUNCTIONS 0`. +-# Provide valid pointers through VmaAllocatorCreateInfo::pVulkanFunctions. + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependant - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. + +The extension is supported by this library. It will be used automatically when +enabled. To enable it: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator`to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That's all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + + vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer. + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It happens when you use lost allocations, and a new image or buffer is + created in place of an existing object that bacame lost. + - It may happen also when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size/2, size/4, size/8. +-# If failed and #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was + specified, try to find space in existing blocks, possilby making some other + allocations lost. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +- Data transfer. Uploading (straming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. +- Allocations for imported/exported external memory. They tend to require + explicit memory type index and dedicated allocation anyway, so they don't + interact with main features of this library. Such special purpose allocations + should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`. +- Recreation of buffers and images. Although the library has functions for + buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to + recreate these objects yourself after defragmentation. That's because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +- Handling CPU memory allocation failures. When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. +- Code free of any compiler warnings. Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. +- This is a C++ library with C interface. + Bindings or ports to any other programming languages are welcomed as external projects and + are not going to be included into this repository. + +*/ + +/* +Define this macro to 0/1 to disable/enable support for recording functionality, +available through VmaAllocatorCreateInfo::pRecordSettings. +*/ +#ifndef VMA_RECORDING_ENABLED + #define VMA_RECORDING_ENABLED 0 +#endif + +#ifndef NOMINMAX + #define NOMINMAX // For windows.h +#endif + +#ifndef VULKAN_H_ + #include +#endif + +#if VMA_RECORDING_ENABLED + #include +#endif + +// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, +// where AAA = major, BBB = minor, CCC = patch. +// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporing the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator allocator, + uint32_t memoryType, + VkDeviceMemory memory, + VkDeviceSize size); +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator allocator, + uint32_t memoryType, + VkDeviceMemory memory, + VkDeviceSize size); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks { + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction pfnFree; +} VmaDeviceMemoryCallbacks; + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits { + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extenion will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +typedef VkFlags VmaAllocatorCreateFlags; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions { + PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory vkAllocateMemory; + PFN_vkFreeMemory vkFreeMemory; + PFN_vkMapMemory vkMapMemory; + PFN_vkUnmapMemory vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory vkBindBufferMemory; + PFN_vkBindImageMemory vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + PFN_vkCreateBuffer vkCreateBuffer; + PFN_vkDestroyBuffer vkDestroyBuffer; + PFN_vkCreateImage vkCreateImage; + PFN_vkDestroyImage vkDestroyImage; + PFN_vkCmdCopyBuffer vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +} VmaVulkanFunctions; + +/// Flags to be used in VmaRecordSettings::flags. +typedef enum VmaRecordFlagBits { + /** \brief Enables flush after recording every function call. + + Enable it if you expect your application to crash, which may leave recording file truncated. + It may degrade performance though. + */ + VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001, + + VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaRecordFlagBits; +typedef VkFlags VmaRecordFlags; + +/// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings. +typedef struct VmaRecordSettings +{ + /// Flags for recording. Use #VmaRecordFlagBits enum. + VmaRecordFlags flags; + /** \brief Path to the file that should be written by the recording. + + Suggested extension: "csv". + If the file already exists, it will be overwritten. + It will be opened for the whole time #VmaAllocator object is alive. + If opening this file fails, creation of the whole allocator object fails. + */ + const char* pFilePath; +} VmaRecordSettings; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* pDeviceMemoryCallbacks; + /** \brief Maximum number of additional frames that are in use at the same time as current frame. + + This value is used only when you make allocations with + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. + + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ + uint32_t frameInUseCount; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* pHeapSizeLimit; + /** \brief Pointers to Vulkan functions. Can be null if you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1`. + + If you leave define `VMA_STATIC_VULKAN_FUNCTIONS 1` in configuration section, + you can pass null as this member, because the library will fetch pointers to + Vulkan functions internally in a static way, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; + + Fill this member if you want to provide your own pointers to Vulkan functions, + e.g. fetched using `vkGetInstanceProcAddr()` and `vkGetDeviceProcAddr()`. + */ + const VmaVulkanFunctions* pVulkanFunctions; + /** \brief Parameters for recording of VMA calls. Can be null. + + If not null, it enables recording of calls to VMA functions to a file. + If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro, + creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + const VmaRecordSettings* pRecordSettings; + /** \brief Optional handle to Vulkan instance object. + + Optional, can be null. Must be set if #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT flas is used + or if `vulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)`. + */ + VkInstance instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or euqal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0 and 1.1 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; +} VmaAllocatorCreateInfo; + +/// Creates Allocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags); + +/** \brief Sets index of the current frame. + +This function must be used if you make allocations with +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and +#VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator +when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot +become lost in the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex); + +/** \brief Calculated statistics of memory usage in entire allocator. +*/ +typedef struct VmaStatInfo +{ + /// Number of `VkDeviceMemory` Vulkan memory blocks allocated. + uint32_t blockCount; + /// Number of #VmaAllocation allocation objects allocated. + uint32_t allocationCount; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Total number of bytes occupied by all allocations. + VkDeviceSize usedBytes; + /// Total number of bytes occupied by unused ranges. + VkDeviceSize unusedBytes; + VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax; + VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax; +} VmaStatInfo; + +/// General statistics from current state of Allocator. +typedef struct VmaStats +{ + VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]; + VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaStatInfo total; +} VmaStats; + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. For faster but more brief statistics +suitable to be called every frame or every allocation, use vmaGetBudget(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator allocator, + VmaStats* pStats); + +/** \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap. +*/ +typedef struct VmaBudget +{ + /** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes. + */ + VkDeviceSize blockBytes; + + /** \brief Sum size of all allocations created in particular heap, in bytes. + + Usually less or equal than `blockBytes`. + Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused - + available for new allocations or wasted due to fragmentation. + + It might be greater than `blockBytes` if there are some allocations in lost state, as they account + to this value as well. + */ + VkDeviceSize allocationBytes; + + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different than `blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, like other programs also consuming system resources. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** \brief Retrieves information about current memory budget for all memory heaps. + +\param[out] pBudget Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStats(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator allocator, + VmaBudget* pBudget); + +#ifndef VMA_STATS_STRING_ENABLED +#define VMA_STATS_STRING_ENABLED 1 +#endif + +#if VMA_STATS_STRING_ENABLED + +/// Builds and returns statistics as string in JSON format. +/** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString); + +#endif // #if VMA_STATS_STRING_ENABLED + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** Memory will be used on device only, so fast access from the device is preferred. + It usually means device-local GPU (video) memory. + No need to be mappable on host. + It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. + + Usage: + + - Resources written and read by device, e.g. images used as attachments. + - Resources transferred from host once (immutable) or infrequently and read by + device multiple times, e.g. textures to be sampled, vertex buffers, uniform + (constant) buffers, and majority of other types of resources used on GPU. + + Allocation may still end up in `HOST_VISIBLE` memory on some implementations. + In such case, you are free to map it. + You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** Memory will be mappable on host. + It usually means CPU (system) memory. + Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. + CPU access is typically uncached. Writes may be write-combined. + Resources created in this pool may still be accessible to the device, but access to them can be slow. + It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. + + Usage: Staging copy of resources used as transfer source. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. + CPU access is typically uncached. Writes may be write-combined. + + Usage: Resources written frequently by host (dynamic), read by device. E.g. textures, vertex buffers, uniform buffers updated every frame or every draw call. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached. + It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. + + Usage: + + - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. + - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`. + + Usage: Staging copy of resources moved from GPU memory to CPU memory as part + of custom paging/residency mechanism, to be moved back to GPU memory when needed. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits { + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + + You should not use this flag if VmaAllocationCreateInfo::pool is not null. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + + If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + Is it valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + + You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT. + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** Allocation created with this flag can become lost as a result of another + allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you + must check it before use. + + To check if allocation is not lost, call vmaGetAllocationInfo() and check if + VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`. + + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + + You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT. + */ + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008, + /** While creating allocation using this flag, other allocations that were + created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost. + + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + */ + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010, + /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pUserData`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + + /** Allocation strategy that chooses smallest possible free range for the + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, + /** Allocation strategy that chooses biggest possible free range for the + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000, + /** Allocation strategy that chooses first suitable free range for the + allocation. + + "First" doesn't necessarily means the one with smallest offset in memory, + but rather the one that is easiest and fastest to find. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000, + + /** Allocation strategy that tries to minimize memory usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT, + /** Allocation strategy that tries to minimize allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + /** Allocation strategy that tries to minimize memory fragmentation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT, + + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +typedef VkFlags VmaAllocationCreateFlags; + +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are prefered. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* pUserData; +} VmaAllocationCreateInfo; + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +It is just a convenience function, equivalent to calling: + +- `vkCreateBuffer` +- `vkGetBufferMemoryRequirements` +- `vmaFindMemoryTypeIndex` +- `vkDestroyBuffer` +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +It is just a convenience function, equivalent to calling: + +- `vkCreateImage` +- `vkGetImageMemoryRequirements` +- `vmaFindMemoryTypeIndex` +- `vkDestroyImage` +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex); + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits { + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. For details, see documentation chapter + \ref linear_algorithm. + + When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default). + + For more details, see [Linear allocation algorithm](@ref linear_algorithm). + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** \brief Enables alternative, buddy allocation algorithm in this pool. + + It operates on a tree of blocks, each having size that is a power of two and + a half of its parent's size. Comparing to default algorithm, this one provides + faster allocation and deallocation and decreased external fragmentation, + at the expense of more memory wasted (internal fragmentation). + + For more details, see [Buddy allocation algorithm](@ref buddy_algorithm). + */ + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +typedef VkFlags VmaPoolCreateFlags; + +/** \brief Describes parameter of created #VmaPool. +*/ +typedef struct VmaPoolCreateInfo { + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief Maximum number of additional frames that are in use at the same time as current frame. + + This value is used only when you make allocations with + #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. + + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ + uint32_t frameInUseCount; +} VmaPoolCreateInfo; + +/** \brief Describes parameter of existing #VmaPool. +*/ +typedef struct VmaPoolStats { + /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes. + */ + VkDeviceSize size; + /** \brief Total number of bytes in the pool not used by any #VmaAllocation. + */ + VkDeviceSize unusedSize; + /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost. + */ + size_t allocationCount; + /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation. + */ + size_t unusedRangeCount; + /** \brief Size of the largest continuous free memory region available for new allocation. + + Making a new allocation of that size is not guaranteed to succeed because of + possible additional margin required to respect alignment and buffer/image + granularity. + */ + VkDeviceSize unusedRangeSizeMax; + /** \brief Number of `VkDeviceMemory` blocks allocated for this pool. + */ + size_t blockCount; +} VmaPoolStats; + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +@param allocator Allocator object. +@param pCreateInfo Parameters of pool to create. +@param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool); + +/** \brief Retrieves statistics of existing #VmaPool object. + +@param allocator Allocator object. +@param pool Pool object. +@param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator allocator, + VmaPool pool, + VmaPoolStats* pPoolStats); + +/** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now. + +@param allocator Allocator object. +@param pool Pool. +@param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator allocator, + VmaPool pool, + size_t* pLostAllocationCount); + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName); + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. + +Some kinds allocations can be in lost state. +For more information, see [Lost allocations](@ref lost_allocations). +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \brief Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). +*/ +typedef struct VmaAllocationInfo { + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + + If the allocation is lost, it is equal to `VK_NULL_HANDLE`. + */ + VkDeviceMemory deviceMemory; + /** \brief Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes, unless allocation is lost. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after call to vmaDefragment() if this allocation is passed to the function. + */ + void* pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* pUserData; +} VmaAllocationInfo; + +/** \brief General purpose memory allocation. + +@param[out] pAllocation Handle to allocated memory. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +@param allocator Allocator object. +@param pVkMemoryRequirements Memory requirements for each allocation. +@param pCreateInfo Creation parameters for each alloction. +@param allocationCount Number of allocations to make. +@param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +@param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo); + +/** +@param[out] pAllocation Handle to allocated memory. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + +/// Function similar to vmaAllocateMemoryForBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + VmaAllocation* pAllocations); + +/** \brief Deprecated. + +In version 2.2.0 it used to try to change allocation's size without moving or reallocating it. +In current version it returns `VK_SUCCESS` only if `newSize` equals current allocation's size. +Otherwise returns `VK_ERROR_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize); + +/** \brief Returns current information about specified allocation and atomically marks it as used in current frame. + +Current paramters of given allocation are returned in `pAllocationInfo`. + +This function also atomically "touches" allocation - marks it as used in current frame, +just like vmaTouchAllocation(). +If the allocation is in lost state, `pAllocationInfo->deviceMemory == VK_NULL_HANDLE`. + +Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient, +you can avoid calling it too often. + +- You can retrieve same VmaAllocationInfo structure while creating your resource, from function + vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change + (e.g. due to defragmentation or allocation becoming lost). +- If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo); + +/** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame. + +If the allocation has been created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +this function returns `VK_TRUE` if it's not in lost state, so it can still be used. +It then also atomically "touches" the allocation - marks it as used in current frame, +so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames. + +If the allocation is in lost state, the function returns `VK_FALSE`. +Memory of such allocation, as well as buffer or image bound to it, should not be used. +Lost allocation and the buffer/image still need to be destroyed. + +If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +this function always returns `VK_TRUE`. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator allocator, + VmaAllocation allocation); + +/** \brief Sets pUserData in given allocation to new value. + +If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT, +pUserData must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pUserData`. String +passed as pUserData doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +pUserData is freed from memory. + +If the flag was not used, the value of pointer `pUserData` is just copied to +allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData); + +/** \brief Creates new allocation that is in lost state from the beginning. + +It can be useful if you need a dummy, non-null allocation. + +You still need to destroy created object using vmaFreeMemory(). + +Returned allocation is not tied to any specific memory pool or memory type and +not bound to any image or buffer. It has size = 0. It cannot be turned into +a real, non-empty allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator allocator, + VmaAllocation* pAllocation); + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. +If the allocation is part of bigger `VkDeviceMemory` block, the pointer is +correctly offseted to the beginning of region assigned to this particular +allocation. + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function always fails when called for allocation that was created with +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be +mapped. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +@param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits); + +/** \struct VmaDefragmentationContext +\brief Represents Opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it. +Call function vmaDefragmentationEnd() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use. +typedef enum VmaDefragmentationFlagBits { + VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1, + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +typedef VkFlags VmaDefragmentationFlags; + +/** \brief Parameters for defragmentation. + +To be used with function vmaDefragmentationBegin(). +*/ +typedef struct VmaDefragmentationInfo2 { + /** \brief Reserved for future use. Should be 0. + */ + VmaDefragmentationFlags flags; + /** \brief Number of allocations in `pAllocations` array. + */ + uint32_t allocationCount; + /** \brief Pointer to array of allocations that can be defragmented. + + The array should have `allocationCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same allocation cannot occur twice. + It is safe to pass allocations that are in the lost state - they are ignored. + All allocations not present in this array are considered non-moveable during this defragmentation. + */ + VmaAllocation* pAllocations; + /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation. + + The array should have `allocationCount` elements. + You can pass null if you are not interested in this information. + */ + VkBool32* pAllocationsChanged; + /** \brief Numer of pools in `pPools` array. + */ + uint32_t poolCount; + /** \brief Either null or pointer to array of pools to be defragmented. + + All the allocations in the specified pools can be moved during defragmentation + and there is no way to check if they were really moved as in `pAllocationsChanged`, + so you must query all the allocations in all these pools for new `VkDeviceMemory` + and offset using vmaGetAllocationInfo() if you might need to recreate buffers + and images bound to them. + + The array should have `poolCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same pool cannot occur twice. + + Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. + It might be more efficient. + */ + VmaPool* pPools; + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`. + + `VK_WHOLE_SIZE` means no limit. + */ + VkDeviceSize maxCpuBytesToMove; + /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`. + + `UINT32_MAX` means no limit. + */ + uint32_t maxCpuAllocationsToMove; + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`. + + `VK_WHOLE_SIZE` means no limit. + */ + VkDeviceSize maxGpuBytesToMove; + /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`. + + `UINT32_MAX` means no limit. + */ + uint32_t maxGpuAllocationsToMove; + /** \brief Optional. Command buffer where GPU copy commands will be posted. + + If not null, it must be a valid command buffer handle that supports Transfer queue type. + It must be in the recording state and outside of a render pass instance. + You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd(). + + Passing null means that only CPU defragmentation will be performed. + */ + VkCommandBuffer commandBuffer; +} VmaDefragmentationInfo2; + +typedef struct VmaDefragmentationPassMoveInfo { + VmaAllocation allocation; + VkDeviceMemory memory; + VkDeviceSize offset; +} VmaDefragmentationPassMoveInfo; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassInfo { + uint32_t moveCount; + VmaDefragmentationPassMoveInfo* pMoves; +} VmaDefragmentationPassInfo; + +/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment(). + +\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. +*/ +typedef struct VmaDefragmentationInfo { + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places. + + Default is `VK_WHOLE_SIZE`, which means no limit. + */ + VkDeviceSize maxBytesToMove; + /** \brief Maximum number of allocations that can be moved to different place. + + Default is `UINT32_MAX`, which means no limit. + */ + uint32_t maxAllocationsToMove; +} VmaDefragmentationInfo; + +/** \brief Statistics returned by function vmaDefragment(). */ +typedef struct VmaDefragmentationStats { + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** \brief Begins defragmentation process. + +@param allocator Allocator object. +@param pInfo Structure filled with parameters of defragmentation. +@param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. +@param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation. +@return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error. + +Use this function instead of old, deprecated vmaDefragment(). + +Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd(): + +- You should not use any of allocations passed as `pInfo->pAllocations` or + any allocations that belong to pools passed as `pInfo->pPools`, + including calling vmaGetAllocationInfo(), vmaTouchAllocation(), or access + their data. +- Some mutexes protecting internal data structures may be locked, so trying to + make or free any allocations, bind buffers or images, map memory, or launch + another simultaneous defragmentation in between may cause stall (when done on + another thread) or deadlock (when done on the same thread), unless you are + 100% sure that defragmented allocations are in different pools. +- Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined. + They become valid after call to vmaDefragmentationEnd(). +- If `pInfo->commandBuffer` is not null, you must submit that command buffer + and make sure it finished execution before calling vmaDefragmentationEnd(). + +For more information and important limitations regarding defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator allocator, + const VmaDefragmentationInfo2* pInfo, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext *pContext); + +/** \brief Ends defragmentation process. + +Use this function to finish defragmentation started by vmaDefragmentationBegin(). +It is safe to pass `context == null`. The function then does nothing. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator allocator, + VmaDefragmentationContext context); + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationPassInfo* pInfo +); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context +); + +/** \brief Deprecated. Compacts memory by moving allocations. + +@param pAllocations Array of allocations that can be moved during this compation. +@param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays. +@param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information. +@param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values. +@param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information. +@return `VK_SUCCESS` if completed, negative error code in case of error. + +\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. + +This function works by moving allocations to different places (different +`VkDeviceMemory` objects and/or different offsets) in order to optimize memory +usage. Only allocations that are in `pAllocations` array can be moved. All other +allocations are considered nonmovable in this call. Basic rules: + +- Only allocations made in memory types that have + `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` + flags can be compacted. You may pass other allocations but it makes no sense - + these will never be moved. +- Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or + #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations + passed to this function that come from such pools are ignored. +- Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or + created as dedicated allocations for any other reason are also ignored. +- Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT + flag can be compacted. If not persistently mapped, memory will be mapped + temporarily inside this function if needed. +- You must not pass same #VmaAllocation object multiple times in `pAllocations` array. + +The function also frees empty `VkDeviceMemory` blocks. + +Warning: This function may be time-consuming, so you shouldn't call it too often +(like after every resource creation/destruction). +You can call it on special occasions (like when reloading a game level or +when you just destroyed a lot of objects). Calling it every frame may be OK, but +you should measure that on your platform. + +For more information, see [Defragmentation](@ref defragmentation) chapter. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator allocator, + VmaAllocation* pAllocations, + size_t allocationCount, + VkBool32* pAllocationsChanged, + const VmaDefragmentationInfo *pDefragmentationInfo, + VmaDefragmentationStats* pDefragmentationStats); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image); + +/** \brief Binds image to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext); + +/** +@param[out] pBuffer Buffer that was created. +@param[out] pAllocation Allocation that was created. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, *pBuffer and *pAllocation are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null +and VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator allocator, + VkImage image, + VmaAllocation allocation); + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; + +Define to 0 if you are going to provide you own pointers to Vulkan functions via +VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) +#define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +// Define this macro to 1 to make the library use STL containers instead of its own implementation. +//#define VMA_USE_STL_CONTAINERS 1 + +/* Set this macro to 1 to make the library including and using STL containers: +std::pair, std::vector, std::list, std::unordered_map. + +Set it to 0 or undefined to make the library using its own implementation of +the containers. +*/ +#if VMA_USE_STL_CONTAINERS + #define VMA_USE_STL_VECTOR 1 + #define VMA_USE_STL_UNORDERED_MAP 1 + #define VMA_USE_STL_LIST 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + // Compiler conforms to C++17. + #if __cplusplus >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/ + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +THESE INCLUDES ARE NOT ENABLED BY DEFAULT. +Library has its own container implementation. +*/ +#if VMA_USE_STL_VECTOR + #include +#endif + +#if VMA_USE_STL_UNORDERED_MAP + #include +#endif + +#if VMA_USE_STL_LIST + #include +#endif + +/* +Following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#include // for assert +#include // for min, max +#include + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +void *aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include +void *aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#endif + +// If your compiler is not compatible with C++11 and definition of +// aligned_alloc() function is missing, uncommeting following line may help: + +//#include + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (__alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #if defined(_WIN32) + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment))) + #else + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) )) + #endif +#endif + +#ifndef VMA_SYSTEM_FREE + #if defined(_WIN32) + #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr) + #else + #define VMA_SYSTEM_FREE(ptr) free(ptr) + #endif +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) (std::min((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) (std::max((v1), (v2))) +#endif + +#ifndef VMA_SWAP + #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(format, ...) + /* + #define VMA_DEBUG_LOG(format, ...) do { \ + printf(format, __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%u", static_cast(num)); + } + static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%llu", static_cast(num)); + } + static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + public: + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_DEBUG_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_ALIGNMENT (1) +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin before and after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin before and after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif + +static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX; + +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; + +/******************************************************************************* +END OF CONFIGURATION +*/ + +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = { + VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + +// Returns number of bits set to 1 in (v). +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T align) +{ + return (val + align - 1) / align * align; +} +// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T align) +{ + return val / align * align; +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x-1)) == 0; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +#if VMA_STATS_STRING_ENABLED + +static const char* VmaAlgorithmToStr(uint32_t algorithm) +{ + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + return "Linear"; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + return "Buddy"; + case 0: + return "Default"; + default: + VMA_ASSERT(0); + return ""; + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +#ifndef VMA_SORT + +template +Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) +{ + Iterator centerValue = end; --centerValue; + Iterator insertIndex = beg; + for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) + { + if(cmp(*memTypeIndex, *centerValue)) + { + if(insertIndex != memTypeIndex) + { + VMA_SWAP(*memTypeIndex, *insertIndex); + } + ++insertIndex; + } + } + if(insertIndex != centerValue) + { + VMA_SWAP(*insertIndex, *centerValue); + } + return insertIndex; +} + +template +void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) +{ + if(beg < end) + { + Iterator it = VmaQuickSortPartition(beg, end, cmp); + VmaQuickSort(beg, it, cmp); + VmaQuickSort(it + 1, end, cmp); + } +} + +#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp) + +#endif // #ifndef VMA_SORT + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if(suballocType1 > suballocType2) + { + VMA_SWAP(suballocType1, suballocType2); + } + + switch(suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for(size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for(size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->Lock(); } } + ~VmaMutexLock() + { if(m_pMutex) { m_pMutex->Unlock(); } } +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockRead(); } } + ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } } +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockWrite(); } } + ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } } +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif + +// Minimum size of a free suballocation to register it in the free suballocation collection. +static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16; + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp) +{ + size_t down = 0, up = (end - beg); + while(down < up) + { + const size_t mid = (down + up) / 2; + if(cmp(*(beg+mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if(it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for(uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if(iPtr == VMA_NULL) + { + return false; + } + for(uint32_t j = i + 1; j < count; ++j) + { + if(iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + return (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if(srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + else + { + return VMA_NULL; + } +} + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if(str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +// STL-compatible allocator. +template +class VmaStlAllocator +{ +public: + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { } + template VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) { } + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } + + VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete; +}; + +#if VMA_USE_STL_VECTOR + +#define VmaVector std::vector + +template +static void VmaVectorInsert(std::vector& vec, size_t index, const T& item) +{ + vec.insert(vec.begin() + index, item); +} + +template +static void VmaVectorRemove(std::vector& vec, size_t index) +{ + vec.erase(vec.begin() + index); +} + +#else // #if VMA_USE_STL_VECTOR + +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + + VmaVector(const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) + { + } + + VmaVector(size_t count, const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) + { + } + + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) + : VmaVector(count, allocator) {} + + VmaVector(const VmaVector& src) : + m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) + { + if(m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } + } + + ~VmaVector() + { + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + } + + VmaVector& operator=(const VmaVector& rhs) + { + if(&rhs != this) + { + resize(rhs.m_Count); + if(m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; + } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + const T* data() const { return m_pArray; } + + T& operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + const T& operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + + T& front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + const T& front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + T& back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + const T& back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + + void reserve(size_t newCapacity, bool freeMemory = false) + { + newCapacity = VMA_MAX(newCapacity, m_Count); + + if((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if(m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + } + + void resize(size_t newCount, bool freeMemory = false) + { + size_t newCapacity = m_Capacity; + if(newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + else if(freeMemory) + { + newCapacity = newCount; + } + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if(elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; + } + + void clear(bool freeMemory = false) + { + resize(0, freeMemory); + } + + void insert(size_t index, const T& src) + { + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if(index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; + } + + void remove(size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if(index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); + } + + void push_back(const T& src) + { + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; + } + + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + + void push_front(const T& src) + { + insert(0, src); + } + + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + + typedef T* iterator; + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} + +#endif // #if VMA_USE_STL_VECTOR + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaPoolAllocator + +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + T* Alloc(); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector< ItemBlock, VmaStlAllocator > m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +T* VmaPoolAllocator::Alloc() +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if(block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for(uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaRawList, VmaList + +#if VMA_USE_STL_LIST + +#define VmaList std::list + +#else // #if VMA_USE_STL_LIST + +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + ~VmaRawList(); + void Clear(); + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + const ItemType* Front() const { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushBack(); + ItemType* PushFront(); + ItemType* PushBack(const T& value); + ItemType* PushFront(const T& value); + void PopBack(); + void PopFront(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) +{ +} + +template +VmaRawList::~VmaRawList() +{ + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. +} + +template +void VmaRawList::Clear() +{ + if(IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while(pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if(pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} + +template +class VmaList +{ + VMA_CLASS_NO_COPY(VmaList) +public: + class iterator + { + public: + iterator() : + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { + } + + T& operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + T* operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } + + iterator& operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + iterator& operator--() + { + if(m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; + } + + iterator operator++(int) + { + iterator result = *this; + ++*this; + return result; + } + iterator operator--(int) + { + iterator result = *this; + --*this; + return result; + } + + bool operator==(const iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : + m_pList(pList), + m_pItem(pItem) + { + } + + friend class VmaList; + }; + + class const_iterator + { + public: + const_iterator() : + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { + } + + const_iterator(const iterator& src) : + m_pList(src.m_pList), + m_pItem(src.m_pItem) + { + } + + const T& operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + const T* operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } + + const_iterator& operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + const_iterator& operator--() + { + if(m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; + } + + const_iterator operator++(int) + { + const_iterator result = *this; + ++*this; + return result; + } + const_iterator operator--(int) + { + const_iterator result = *this; + --*this; + return result; + } + + bool operator==(const const_iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const const_iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } + + private: + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : + m_pList(pList), + m_pItem(pItem) + { + } + + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + friend class VmaList; + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { } + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + void clear() { m_RawList.Clear(); } + void push_back(const T& value) { m_RawList.PushBack(value); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + +private: + VmaRawList m_RawList; +}; + +#endif // #if VMA_USE_STL_LIST + +//////////////////////////////////////////////////////////////////////////////// +// class VmaMap + +// Unused in this version. +#if 0 + +#if VMA_USE_STL_UNORDERED_MAP + +#define VmaPair std::pair + +#define VMA_MAP_TYPE(KeyT, ValueT) \ + std::unordered_map< KeyT, ValueT, std::hash, std::equal_to, VmaStlAllocator< std::pair > > + +#else // #if VMA_USE_STL_UNORDERED_MAP + +template +struct VmaPair +{ + T1 first; + T2 second; + + VmaPair() : first(), second() { } + VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { } +}; + +/* Class compatible with subset of interface of std::unordered_map. +KeyT, ValueT must be POD because they will be stored in VmaVector. +*/ +template +class VmaMap +{ +public: + typedef VmaPair PairType; + typedef PairType* iterator; + + VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) { } + + iterator begin() { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + + void insert(const PairType& pair); + iterator find(const KeyT& key); + void erase(iterator it); + +private: + VmaVector< PairType, VmaStlAllocator > m_Vector; +}; + +#define VMA_MAP_TYPE(KeyT, ValueT) VmaMap + +template +struct VmaPairFirstLess +{ + bool operator()(const VmaPair& lhs, const VmaPair& rhs) const + { + return lhs.first < rhs.first; + } + bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const + { + return lhs.first < rhsFirst; + } +}; + +template +void VmaMap::insert(const PairType& pair) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + pair, + VmaPairFirstLess()) - m_Vector.data(); + VmaVectorInsert(m_Vector, indexToInsert, pair); +} + +template +VmaPair* VmaMap::find(const KeyT& key) +{ + PairType* it = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + key, + VmaPairFirstLess()); + if((it != m_Vector.end()) && (it->first == key)) + { + return it; + } + else + { + return m_Vector.end(); + } +} + +template +void VmaMap::erase(iterator it) +{ + VmaVectorRemove(m_Vector, it - m_Vector.begin()); +} + +#endif // #if VMA_USE_STL_UNORDERED_MAP + +#endif // #if 0 + +//////////////////////////////////////////////////////////////////////////////// + +class VmaDeviceMemoryBlock; + +enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE }; + +struct VmaAllocation_T +{ +private: + static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80; + + enum FLAGS + { + FLAG_USER_DATA_STRING = 0x01, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + /* + This struct is allocated using VmaPoolAllocator. + */ + + void Ctor(uint32_t currentFrameIndex, bool userDataString) + { + m_Alignment = 1; + m_Size = 0; + m_MemoryTypeIndex = 0; + m_pUserData = VMA_NULL; + m_LastUseFrameIndex = currentFrameIndex; + m_Type = (uint8_t)ALLOCATION_TYPE_NONE; + m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN; + m_MapCount = 0; + m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0; + +#if VMA_STATS_STRING_ENABLED + m_CreationFrameIndex = currentFrameIndex; + m_BufferImageUsage = 0; +#endif + } + + void Dtor() + { + VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pUserData == VMA_NULL); + } + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VkDeviceSize offset, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped, + bool canBecomeLost) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_Offset = offset; + m_BlockAllocation.m_CanBecomeLost = canBecomeLost; + } + + void InitLost() + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_MemoryTypeIndex = 0; + m_BlockAllocation.m_Block = VMA_NULL; + m_BlockAllocation.m_Offset = 0; + m_BlockAllocation.m_CanBecomeLost = true; + } + + void ChangeBlockAllocation( + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset); + + void ChangeOffset(VkDeviceSize newOffset); + + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_pMappedData = pMappedData; + } + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; } + void* GetUserData() const { return m_pUserData; } + void SetUserData(VmaAllocator hAllocator, void* pUserData); + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + return m_BlockAllocation.m_Block; + } + VkDeviceSize GetOffset() const; + VkDeviceMemory GetMemory() const; + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; } + void* GetMappedData() const; + bool CanBecomeLost() const; + + uint32_t GetLastUseFrameIndex() const + { + return m_LastUseFrameIndex.load(); + } + bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired) + { + return m_LastUseFrameIndex.compare_exchange_weak(expected, desired); + } + /* + - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex, + makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true. + - Else, returns false. + + If hAllocation is already lost, assert - you should not call it then. + If hAllocation was not created with CAN_BECOME_LOST_BIT, assert. + */ + bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); + outInfo.blockCount = 1; + outInfo.allocationCount = 1; + outInfo.unusedRangeCount = 0; + outInfo.usedBytes = m_Size; + outInfo.unusedBytes = 0; + outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + } + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; } + uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } + + void InitBufferImageUsage(uint32_t bufferImageUsage) + { + VMA_ASSERT(m_BufferImageUsage == 0); + m_BufferImageUsage = bufferImageUsage; + } + + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + VMA_ATOMIC_UINT32 m_LastUseFrameIndex; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT. + // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS + + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VkDeviceSize m_Offset; + bool m_CanBecomeLost; + }; + + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VkDeviceMemory m_hMemory; + void* m_pMappedData; // Not null means memory is mapped. + }; + + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + +#if VMA_STATS_STRING_ENABLED + uint32_t m_CreationFrameIndex; + uint32_t m_BufferImageUsage; // 0 if unknown. +#endif + + void FreeUserDataString(VmaAllocator hAllocator); +}; + +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + VmaAllocation hAllocation; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +typedef VmaList< VmaSuballocation, VmaStlAllocator > VmaSuballocationList; + +// Cost of one additional allocation lost, as equivalent in bytes. +static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576; + +enum class VmaAllocationRequestType +{ + Normal, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. + +If canMakeOtherLost was false: +- item points to a FREE suballocation. +- itemsToMakeLostCount is 0. + +If canMakeOtherLost was true: +- item points to first of sequence of suballocations, which are either FREE, + or point to VmaAllocations that can become lost. +- itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for + the requested allocation to succeed. +*/ +struct VmaAllocationRequest +{ + VkDeviceSize offset; + VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation. + VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation. + VmaSuballocationList::iterator item; + size_t itemsToMakeLostCount; + void* customData; + VmaAllocationRequestType type; + + VkDeviceSize CalcCost() const + { + return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST; + } +}; + +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ +public: + VmaBlockMetadata(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata() { } + virtual void Init(VkDeviceSize size) { m_Size = size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + VkDeviceSize GetSize() const { return m_Size; } + virtual size_t GetAllocationCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0; + // Shouldn't modify blockCount. + virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(const VmaAllocation allocation) = 0; + virtual void FreeAtOffset(VkDeviceSize offset) = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; +}; + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +class VmaBlockMetadata_Generic : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) +public: + VmaBlockMetadata_Generic(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Generic(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; } + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const; + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData); + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation); + virtual void FreeAtOffset(VkDeviceSize offset); + + //////////////////////////////////////////////////////////////////////////////// + // For defragmentation + + bool IsBufferImageGranularityConflictPossible( + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const; + +private: + friend class VmaDefragmentationAlgorithm_Generic; + friend class VmaDefragmentationAlgorithm_Fast; + + uint32_t m_FreeCount; + VkDeviceSize m_SumFreeSize; + VmaSuballocationList m_Suballocations; + // Suballocations that are free and have size greater than certain threshold. + // Sorted by size, ascending. + VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize; + + bool ValidateFreeSuballocationList() const; + + // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. + // If yes, fills pOffset and returns true. If no, returns false. + bool CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const; + // Given free suballocation, it merges it with following one, which must also be free. + void MergeFreeWithNext(VmaSuballocationList::iterator item); + // Releases given suballocation, making it free. + // Merges it with adjacent free suballocations if applicable. + // Returns iterator to new free suballocation at this place. + VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); + // Given free suballocation, it inserts it into sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void RegisterFreeSuballocation(VmaSuballocationList::iterator item); + // Given free suballocation, it removes it from sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); +}; + +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Linear(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const; + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const { return GetAllocationCount() == 0; } + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData); + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation); + virtual void FreeAtOffset(VkDeviceSize offset); + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector< VmaSuballocation, VmaStlAllocator > SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +/* +- GetSize() is the original size of allocated memory block. +- m_UsableSize is this size aligned down to a power of two. + All allocations and calculations happen relative to m_UsableSize. +- GetUnusableSize() is the difference between them. + It is repoted as separate, unused range, not available for allocations. + +Node at level 0 has size = m_UsableSize. +Each next level contains nodes with size 2 times smaller than current level. +m_LevelCount is the maximum number of levels to use in the current object. +*/ +class VmaBlockMetadata_Buddy : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) +public: + VmaBlockMetadata_Buddy(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Buddy(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const { return m_AllocationCount; } + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; } + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); } + virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); } + +private: + static const VkDeviceSize MIN_NODE_SIZE = 32; + static const size_t MAX_LEVELS = 30; + + struct ValidationContext + { + size_t calculatedAllocationCount; + size_t calculatedFreeCount; + VkDeviceSize calculatedSumFreeSize; + + ValidationContext() : + calculatedAllocationCount(0), + calculatedFreeCount(0), + calculatedSumFreeSize(0) { } + }; + + struct Node + { + VkDeviceSize offset; + enum TYPE + { + TYPE_FREE, + TYPE_ALLOCATION, + TYPE_SPLIT, + TYPE_COUNT + } type; + Node* parent; + Node* buddy; + + union + { + struct + { + Node* prev; + Node* next; + } free; + struct + { + VmaAllocation alloc; + } allocation; + struct + { + Node* leftChild; + } split; + }; + }; + + // Size of the memory block aligned down to a power of two. + VkDeviceSize m_UsableSize; + uint32_t m_LevelCount; + + Node* m_Root; + struct { + Node* front; + Node* back; + } m_FreeList[MAX_LEVELS]; + // Number of nodes in the tree with type == TYPE_ALLOCATION. + size_t m_AllocationCount; + // Number of nodes in the tree with type == TYPE_FREE. + size_t m_FreeCount; + // This includes space wasted due to internal fragmentation. Doesn't include unusable size. + VkDeviceSize m_SumFreeSize; + + VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } + void DeleteNode(Node* node); + bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; + uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; + inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } + // Alloc passed just for validation. Can be null. + void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset); + void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const; + // Adds node to the front of FreeList at given level. + // node->type must be FREE. + // node->free.prev, next can be undefined. + void AddToFreeListFront(uint32_t level, Node* node); + // Removes node from FreeList at given level. + // node->type must be FREE. + // node->free.prev, next stay untouched. + void RemoveFromFreeList(uint32_t level, Node* node); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; +#endif +}; + +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: This class must be externally synchronized. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + + ~VmaDeviceMemoryBlock() + { + VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + } + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_Mutex; + uint32_t m_MapCount; + void* m_pMappedData; +}; + +struct VmaPointerLess +{ + bool operator()(const void* lhs, const void* rhs) const + { + return lhs < rhs; + } +}; + +struct VmaDefragmentationMove +{ + size_t srcBlockIndex; + size_t dstBlockIndex; + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; + VmaAllocation hAllocation; + VmaDeviceMemoryBlock* pSrcBlock; + VmaDeviceMemoryBlock* pDstBlock; +}; + +class VmaDefragmentationAlgorithm; + +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +struct VmaBlockVector +{ + VMA_CLASS_NO_COPY(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm); + ~VmaBlockVector(); + + VkResult CreateMinBlocks(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + + void GetPoolStats(VmaPoolStats* pStats); + + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + + // Adds statistics of this BlockVector to pStats. + void AddStats(VmaStats* pStats); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void MakePoolAllocationsLost( + uint32_t currentFrameIndex, + size_t* pLostAllocationCount); + VkResult CheckCorruption(); + + // Saves results in pCtx->res. + void Defragment( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer); + void DefragmentationEnd( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats); + + uint32_t ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves); + + void CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats); + + //////////////////////////////////////////////////////////////////////////////// + // To be used only while the m_Mutex is locked. Used during defragmentation. + + size_t GetBlockCount() const { return m_Blocks.size(); } + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + size_t CalcAllocationCount() const; + bool IsBufferImageGranularityConflictPossible() const; + +private: + friend class VmaDefragmentationAlgorithm_Generic; + + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const uint32_t m_FrameInUseCount; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + VMA_RW_MUTEX m_Mutex; + + /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) - + a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */ + bool m_HasEmptyBlock; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator > m_Blocks; + uint32_t m_NextBlockId; + + VkDeviceSize CalcMaxBlockSize() const; + + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + + VkResult AllocatePage( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + // To be used only without CAN_MAKE_OTHER_LOST flag. + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + + // Saves result to pCtx->res. + void ApplyDefragmentationMovesCpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves); + // Saves result to pCtx->res. + void ApplyDefragmentationMovesGpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkCommandBuffer commandBuffer); + + /* + Used during defragmentation. pDefragmentationStats is optional. It's in/out + - updated with new data. + */ + void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats); + + void UpdateHasEmptyBlock(); +}; + +struct VmaPool_T +{ + VMA_CLASS_NO_COPY(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; +}; + +/* +Performs defragmentation: + +- Updates `pBlockVector->m_pMetadata`. +- Updates allocations by calling ChangeBlockAllocation() or ChangeOffset(). +- Does not move actual data, only returns requested moves as `moves`. +*/ +class VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm) +public: + VmaDefragmentationAlgorithm( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex) : + m_hAllocator(hAllocator), + m_pBlockVector(pBlockVector), + m_CurrentFrameIndex(currentFrameIndex) + { + } + virtual ~VmaDefragmentationAlgorithm() + { + } + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0; + virtual void AddAll() = 0; + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) = 0; + + virtual VkDeviceSize GetBytesMoved() const = 0; + virtual uint32_t GetAllocationsMoved() const = 0; + +protected: + VmaAllocator const m_hAllocator; + VmaBlockVector* const m_pBlockVector; + const uint32_t m_CurrentFrameIndex; + + struct AllocationInfo + { + VmaAllocation m_hAllocation; + VkBool32* m_pChanged; + + AllocationInfo() : + m_hAllocation(VK_NULL_HANDLE), + m_pChanged(VMA_NULL) + { + } + AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) : + m_hAllocation(hAlloc), + m_pChanged(pChanged) + { + } + }; +}; + +class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic) +public: + VmaDefragmentationAlgorithm_Generic( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); + virtual ~VmaDefragmentationAlgorithm_Generic(); + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); + virtual void AddAll() { m_AllAllocations = true; } + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); + + virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } + virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } + +private: + uint32_t m_AllocationCount; + bool m_AllAllocations; + + VkDeviceSize m_BytesMoved; + uint32_t m_AllocationsMoved; + + struct AllocationInfoSizeGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { + return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize(); + } + }; + + struct AllocationInfoOffsetGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { + return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset(); + } + }; + + struct BlockInfo + { + size_t m_OriginalBlockIndex; + VmaDeviceMemoryBlock* m_pBlock; + bool m_HasNonMovableAllocations; + VmaVector< AllocationInfo, VmaStlAllocator > m_Allocations; + + BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) : + m_OriginalBlockIndex(SIZE_MAX), + m_pBlock(VMA_NULL), + m_HasNonMovableAllocations(true), + m_Allocations(pAllocationCallbacks) + { + } + + void CalcHasNonMovableAllocations() + { + const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount(); + const size_t defragmentAllocCount = m_Allocations.size(); + m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount; + } + + void SortAllocationsBySizeDescending() + { + VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater()); + } + + void SortAllocationsByOffsetDescending() + { + VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater()); + } + }; + + struct BlockPointerLess + { + bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const + { + return pLhsBlockInfo->m_pBlock < pRhsBlock; + } + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock; + } + }; + + // 1. Blocks with some non-movable allocations go first. + // 2. Blocks with smaller sumFreeSize go first. + struct BlockInfoCompareMoveDestination + { + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) + { + return true; + } + if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) + { + return false; + } + if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) + { + return true; + } + return false; + } + }; + + typedef VmaVector< BlockInfo*, VmaStlAllocator > BlockInfoVector; + BlockInfoVector m_Blocks; + + VkResult DefragmentRound( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations); + + size_t CalcBlocksWithNonMovableCount() const; + + static bool MoveMakesSense( + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset); +}; + +class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast) +public: + VmaDefragmentationAlgorithm_Fast( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); + virtual ~VmaDefragmentationAlgorithm_Fast(); + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; } + virtual void AddAll() { m_AllAllocations = true; } + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); + + virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } + virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } + +private: + struct BlockInfo + { + size_t origBlockIndex; + }; + + class FreeSpaceDatabase + { + public: + FreeSpaceDatabase() + { + FreeSpace s = {}; + s.blockInfoIndex = SIZE_MAX; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + m_FreeSpaces[i] = s; + } + } + + void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) + { + if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + return; + } + + // Find first invalid or the smallest structure. + size_t bestIndex = SIZE_MAX; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + // Empty structure. + if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) + { + bestIndex = i; + break; + } + if(m_FreeSpaces[i].size < size && + (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) + { + bestIndex = i; + } + } + + if(bestIndex != SIZE_MAX) + { + m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex; + m_FreeSpaces[bestIndex].offset = offset; + m_FreeSpaces[bestIndex].size = size; + } + } + + bool Fetch(VkDeviceSize alignment, VkDeviceSize size, + size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset) + { + size_t bestIndex = SIZE_MAX; + VkDeviceSize bestFreeSpaceAfter = 0; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + // Structure is valid. + if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) + { + const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment); + // Allocation fits into this structure. + if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) + { + const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) - + (dstOffset + size); + if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) + { + bestIndex = i; + bestFreeSpaceAfter = freeSpaceAfter; + } + } + } + } + + if(bestIndex != SIZE_MAX) + { + outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex; + outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment); + + if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + // Leave this structure for remaining empty space. + const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size; + m_FreeSpaces[bestIndex].offset += alignmentPlusSize; + m_FreeSpaces[bestIndex].size -= alignmentPlusSize; + } + else + { + // This structure becomes invalid. + m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX; + } + + return true; + } + + return false; + } + + private: + static const size_t MAX_COUNT = 4; + + struct FreeSpace + { + size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. + VkDeviceSize offset; + VkDeviceSize size; + } m_FreeSpaces[MAX_COUNT]; + }; + + const bool m_OverlappingMoveSupported; + + uint32_t m_AllocationCount; + bool m_AllAllocations; + + VkDeviceSize m_BytesMoved; + uint32_t m_AllocationsMoved; + + VmaVector< BlockInfo, VmaStlAllocator > m_BlockInfos; + + void PreprocessMetadata(); + void PostprocessMetadata(); + void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc); +}; + +struct VmaBlockDefragmentationContext +{ + enum BLOCK_FLAG + { + BLOCK_FLAG_USED = 0x00000001, + }; + uint32_t flags; + VkBuffer hBuffer; +}; + +class VmaBlockVectorDefragmentationContext +{ + VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext) +public: + VkResult res; + bool mutexLocked; + VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator > blockContexts; + VmaVector< VmaDefragmentationMove, VmaStlAllocator > defragmentationMoves; + uint32_t defragmentationMovesProcessed; + uint32_t defragmentationMovesCommitted; + bool hasDefragmentationPlan; + + VmaBlockVectorDefragmentationContext( + VmaAllocator hAllocator, + VmaPool hCustomPool, // Optional. + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex); + ~VmaBlockVectorDefragmentationContext(); + + VmaPool GetCustomPool() const { return m_hCustomPool; } + VmaBlockVector* GetBlockVector() const { return m_pBlockVector; } + VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; } + + void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); + void AddAll() { m_AllAllocations = true; } + + void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags); + +private: + const VmaAllocator m_hAllocator; + // Null if not from custom pool. + const VmaPool m_hCustomPool; + // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors. + VmaBlockVector* const m_pBlockVector; + const uint32_t m_CurrFrameIndex; + // Owner of this object. + VmaDefragmentationAlgorithm* m_pAlgorithm; + + struct AllocInfo + { + VmaAllocation hAlloc; + VkBool32* pChanged; + }; + // Used between constructor and Begin. + VmaVector< AllocInfo, VmaStlAllocator > m_Allocations; + bool m_AllAllocations; +}; + +struct VmaDefragmentationContext_T +{ +private: + VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats); + ~VmaDefragmentationContext_T(); + + void AddPools(uint32_t poolCount, VmaPool* pPools); + void AddAllocations( + uint32_t allocationCount, + VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged); + + /* + Returns: + - `VK_SUCCESS` if succeeded and object can be destroyed immediately. + - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd(). + - Negative value if error occured and object can be destroyed immediately. + */ + VkResult Defragment( + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags); + + VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo); + VkResult DefragmentPassEnd(); + +private: + const VmaAllocator m_hAllocator; + const uint32_t m_CurrFrameIndex; + const uint32_t m_Flags; + VmaDefragmentationStats* const m_pStats; + + VkDeviceSize m_MaxCpuBytesToMove; + uint32_t m_MaxCpuAllocationsToMove; + VkDeviceSize m_MaxGpuBytesToMove; + uint32_t m_MaxGpuAllocationsToMove; + + // Owner of these objects. + VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES]; + // Owner of these objects. + VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator > m_CustomPoolContexts; +}; + +#if VMA_RECORDING_ENABLED + +class VmaRecorder +{ +public: + VmaRecorder(); + VkResult Init(const VmaRecordSettings& settings, bool useMutex); + void WriteConfiguration( + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled); + ~VmaRecorder(); + + void RecordCreateAllocator(uint32_t frameIndex); + void RecordDestroyAllocator(uint32_t frameIndex); + void RecordCreatePool(uint32_t frameIndex, + const VmaPoolCreateInfo& createInfo, + VmaPool pool); + void RecordDestroyPool(uint32_t frameIndex, VmaPool pool); + void RecordAllocateMemory(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations); + void RecordAllocateMemoryForBuffer(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordAllocateMemoryForImage(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordFreeMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations); + void RecordSetAllocationUserData(uint32_t frameIndex, + VmaAllocation allocation, + const void* pUserData); + void RecordCreateLostAllocation(uint32_t frameIndex, + VmaAllocation allocation); + void RecordMapMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordUnmapMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordFlushAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + void RecordInvalidateAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + void RecordCreateBuffer(uint32_t frameIndex, + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); + void RecordCreateImage(uint32_t frameIndex, + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); + void RecordDestroyBuffer(uint32_t frameIndex, + VmaAllocation allocation); + void RecordDestroyImage(uint32_t frameIndex, + VmaAllocation allocation); + void RecordTouchAllocation(uint32_t frameIndex, + VmaAllocation allocation); + void RecordGetAllocationInfo(uint32_t frameIndex, + VmaAllocation allocation); + void RecordMakePoolAllocationsLost(uint32_t frameIndex, + VmaPool pool); + void RecordDefragmentationBegin(uint32_t frameIndex, + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx); + void RecordDefragmentationEnd(uint32_t frameIndex, + VmaDefragmentationContext ctx); + void RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name); + +private: + struct CallParams + { + uint32_t threadId; + double time; + }; + + class UserDataString + { + public: + UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData); + const char* GetString() const { return m_Str; } + + private: + char m_PtrStr[17]; + const char* m_Str; + }; + + bool m_UseMutex; + VmaRecordFlags m_Flags; + FILE* m_File; + VMA_MUTEX m_FileMutex; + int64_t m_Freq; + int64_t m_StartCounter; + + void GetBasicParams(CallParams& outParams); + + // T must be a pointer type, e.g. VmaAllocation, VmaPool. + template + void PrintPointerList(uint64_t count, const T* pItems) + { + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } + } + + void PrintPointerList(uint64_t count, const VmaAllocation* pItems); + void Flush(); +}; + +#endif // #if VMA_RECORDING_ENABLED + +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks); + + VmaAllocation Allocate(); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +struct VmaCurrentBudgetData +{ + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // #if VMA_MEMORY_BUDGET + + VmaCurrentBudgetData() + { + for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif + } + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + m_AllocationBytes[heapIndex] += allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } + + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME + m_AllocationBytes[heapIndex] -= allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } +}; + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY(VmaAllocator_T) +public: + bool m_UseMutex; + uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + VkDevice m_hDevice; + VkInstance m_hInstance; + bool m_AllocationCallbacksSpecified; + VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + + // Each vector is sorted by memory (handle value). + typedef VmaVector< VmaAllocation, VmaStlAllocator > AllocationVectorType; + AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES]; + VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_DEBUG_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + +#if VMA_RECORDING_ENABLED + VmaRecorder* GetRecorder() const { return m_pRecorder; } +#endif + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + VkResult ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize); + + void CalculateStats(VmaStats* pStats); + + void GetBudget( + VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult DefragmentationBegin( + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext); + VkResult DefragmentationEnd( + VmaDefragmentationContext context); + + VkResult DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context); + VkResult DefragmentationPassEnd( + VmaDefragmentationContext context); + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + bool TouchAllocation(VmaAllocation hAllocation); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + void MakePoolAllocationsLost( + VmaPool hPool, + size_t* pLostAllocationCount); + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + void CreateLostAllocation(VmaAllocation* pAllocation); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + void FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. + + VMA_RW_MUTEX m_PoolsMutex; + // Protected by m_PoolsMutex. Sorted by pointer value. + VmaVector > m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + +#if VMA_RECORDING_ENABLED + VmaRecorder* m_pRecorder; +#endif + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation #2 after VmaAllocator_T definition + +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaStringBuilder + +#if VMA_STATS_STRING_ENABLED + +class VmaStringBuilder +{ +public: + VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator(alloc->GetAllocationCallbacks())) { } + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + + void Add(char ch) { m_Data.push_back(ch); } + void Add(const char* pStr); + void AddNewLine() { Add('\n'); } + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector< char, VmaStlAllocator > m_Data; +}; + +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if(strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char *p = &buf[10]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char *p = &buf[20]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// VmaJsonWriter + +#if VMA_STATS_STRING_ENABLED + +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY(VmaJsonWriter) +public: + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + void BeginObject(bool singleLine = false); + void EndObject(); + + void BeginArray(bool singleLine = false); + void EndArray(); + + void WriteString(const char* pStr); + void BeginString(const char* pStr = VMA_NULL); + void ContinueString(const char* pStr); + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + void ContinueString_Pointer(const void* ptr); + void EndString(const char* pStr = VMA_NULL); + + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + void WriteBool(bool b); + void WriteNull(); + +private: + static const char* const INDENT; + + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; + +const char* const VmaJsonWriter::INDENT = " "; + +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) : + m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) +{ +} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if(pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for(size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if(ch == '\\') + { + m_SB.Add("\\\\"); + } + else if(ch == '"') + { + m_SB.Add("\\\""); + } + else if(ch >= 32) + { + m_SB.Add(ch); + } + else switch(ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + break; + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if(pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if(!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if(currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if(!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if(count > 0 && oneLess) + { + --count; + } + for(size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) +{ + if(IsUserDataString()) + { + VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData); + + FreeUserDataString(hAllocator); + + if(pUserData != VMA_NULL) + { + m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData); + } + } + else + { + m_pUserData = pUserData; + } +} + +void VmaAllocation_T::ChangeBlockAllocation( + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset) +{ + VMA_ASSERT(block != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + + // Move mapping reference counter from old block to new block. + if(block != m_BlockAllocation.m_Block) + { + uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP; + if(IsPersistentMap()) + ++mapRefCount; + m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount); + block->Map(hAllocator, mapRefCount, VMA_NULL); + } + + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_Offset = offset; +} + +void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + m_BlockAllocation.m_Offset = newOffset; +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Offset; + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if(m_MapCount != 0) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + m_BlockAllocation.m_Offset; + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0)); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +bool VmaAllocation_T::CanBecomeLost() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_CanBecomeLost; + case ALLOCATION_TYPE_DEDICATED: + return false; + default: + VMA_ASSERT(0); + return false; + } +} + +bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + VMA_ASSERT(CanBecomeLost()); + + /* + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ + uint32_t localLastUseFrameIndex = GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + VMA_ASSERT(0); + return false; + } + else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex) + { + return false; + } + else // Last use time earlier than current time. + { + if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST)) + { + // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST. + // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock. + return true; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED + +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = { + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; + +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + + if(m_pUserData != VMA_NULL) + { + json.WriteString("UserData"); + if(IsUserDataString()) + { + json.WriteString((const char*)m_pUserData); + } + else + { + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + } + + json.WriteString("CreationFrameIndex"); + json.WriteNumber(m_CreationFrameIndex); + + json.WriteString("LastUseFrameIndex"); + json.WriteNumber(GetLastUseFrameIndex()); + + if(m_BufferImageUsage != 0) + { + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage); + } +} + +#endif + +void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) +{ + VMA_ASSERT(IsUserDataString()); + VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData); + m_pUserData = VMA_NULL; +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if(m_MapCount != 0) + { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { + VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if(result == VK_SUCCESS) + { + m_DedicatedAllocation.m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { + --m_MapCount; + if(m_MapCount == 0) + { + m_DedicatedAllocation.m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED + +static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) +{ + json.BeginObject(); + + json.WriteString("Blocks"); + json.WriteNumber(stat.blockCount); + + json.WriteString("Allocations"); + json.WriteNumber(stat.allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber(stat.unusedRangeCount); + + json.WriteString("UsedBytes"); + json.WriteNumber(stat.usedBytes); + + json.WriteString("UnusedBytes"); + json.WriteNumber(stat.unusedBytes); + + if(stat.allocationCount > 1) + { + json.WriteString("AllocationSize"); + json.BeginObject(true); + json.WriteString("Min"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("Avg"); + json.WriteNumber(stat.allocationSizeAvg); + json.WriteString("Max"); + json.WriteNumber(stat.allocationSizeMax); + json.EndObject(); + } + + if(stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSize"); + json.BeginObject(true); + json.WriteString("Min"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("Avg"); + json.WriteNumber(stat.unusedRangeSizeAvg); + json.WriteString("Max"); + json.WriteNumber(stat.unusedRangeSizeMax); + json.EndObject(); + } + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +struct VmaSuballocationItemSizeLess +{ + bool operator()( + const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + bool operator()( + const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata + +VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) : + m_Size(0), + m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks()) +{ +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const +{ + json.BeginObject(); + + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + hAllocation->PrintParameters(json); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Generic + +VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_FreeCount(0), + m_SumFreeSize(0), + m_Suballocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_FreeSuballocationsBySize(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ +} + +VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic() +{ +} + +void VmaBlockMetadata_Generic::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_FreeCount = 1; + m_SumFreeSize = size; + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + + VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER); + m_Suballocations.push_back(suballoc); + VmaSuballocationList::iterator suballocItem = m_Suballocations.end(); + --suballocItem; + m_FreeSuballocationsBySize.push_back(suballocItem); +} + +bool VmaBlockMetadata_Generic::Validate() const +{ + VMA_VALIDATE(!m_Suballocations.empty()); + + // Expected offset of new suballocation as calculated from previous ones. + VkDeviceSize calculatedOffset = 0; + // Expected number of free suballocations as calculated from traversing their list. + uint32_t calculatedFreeCount = 0; + // Expected sum size of free suballocations as calculated from traversing their list. + VkDeviceSize calculatedSumFreeSize = 0; + // Expected number of free suballocations that should be registered in + // m_FreeSuballocationsBySize calculated from traversing their list. + size_t freeSuballocationsToRegister = 0; + // True if previous visited suballocation was free. + bool prevFree = false; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& subAlloc = *suballocItem; + + // Actual offset of this suballocation doesn't match expected one. + VMA_VALIDATE(subAlloc.offset == calculatedOffset); + + const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Two adjacent free suballocations are invalid. They should be merged. + VMA_VALIDATE(!prevFree || !currFree); + + VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE)); + + if(currFree) + { + calculatedSumFreeSize += subAlloc.size; + ++calculatedFreeCount; + if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + ++freeSuballocationsToRegister; + } + + // Margin required between allocations - every free space must be at least that large. + VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN); + } + else + { + VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset); + VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size); + + // Margin required between allocations - previous allocation must be free. + VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree); + } + + calculatedOffset += subAlloc.size; + prevFree = currFree; + } + + // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't + // match expected one. + VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); + + VkDeviceSize lastSize = 0; + for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) + { + VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; + + // Only free suballocations can be registered in m_FreeSuballocationsBySize. + VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); + // They must be sorted by size ascending. + VMA_VALIDATE(suballocItem->size >= lastSize); + + lastSize = suballocItem->size; + } + + // Check if totals match calculacted values. + VMA_VALIDATE(ValidateFreeSuballocationList()); + VMA_VALIDATE(calculatedOffset == GetSize()); + VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); + VMA_VALIDATE(calculatedFreeCount == m_FreeCount); + + return true; +} + +VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const +{ + if(!m_FreeSuballocationsBySize.empty()) + { + return m_FreeSuballocationsBySize.back()->size; + } + else + { + return 0; + } +} + +bool VmaBlockMetadata_Generic::IsEmpty() const +{ + return (m_Suballocations.size() == 1) && (m_FreeCount == 1); +} + +void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + outInfo.blockCount = 1; + + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + outInfo.allocationCount = rangeCount - m_FreeCount; + outInfo.unusedRangeCount = m_FreeCount; + + outInfo.unusedBytes = m_SumFreeSize; + outInfo.usedBytes = GetSize() - outInfo.unusedBytes; + + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& suballoc = *suballocItem; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size); + } + else + { + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size); + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size); + } + } +} + +void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + + inoutStats.size += GetSize(); + inoutStats.unusedSize += m_SumFreeSize; + inoutStats.allocationCount += rangeCount - m_FreeCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const +{ + PrintDetailedMap_Begin(json, + m_SumFreeSize, // unusedBytes + m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount + m_FreeCount); // unusedRangeCount + + size_t i = 0; + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem, ++i) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size); + } + else + { + PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation); + } + } + + PrintDetailedMap_End(json); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Generic::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(!upperAddress); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + pAllocationRequest->type = VmaAllocationRequestType::Normal; + + // There is not enough total free space in this block to fullfill the request: Early return. + if(canMakeOtherLost == false && + m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN) + { + return false; + } + + // New algorithm, efficiently searching freeSuballocationsBySize. + const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); + if(freeSuballocCount > 0) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN. + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + freeSuballocCount, + allocSize + 2 * VMA_DEBUG_MARGIN, + VmaSuballocationItemSizeLess()); + size_t index = it - m_FreeSuballocationsBySize.data(); + for(; index < freeSuballocCount; ++index) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) + { + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + it, + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = it; + return true; + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Search staring from biggest suballocations. + for(size_t index = freeSuballocCount; index--; ) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + } + + if(canMakeOtherLost) + { + // Brute-force algorithm. TODO: Come up with something better. + + bool found = false; + VmaAllocationRequest tmpAllocRequest = {}; + tmpAllocRequest.type = VmaAllocationRequestType::Normal; + for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin(); + suballocIt != m_Suballocations.end(); + ++suballocIt) + { + if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE || + suballocIt->hAllocation->CanBecomeLost()) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + suballocIt, + canMakeOtherLost, + &tmpAllocRequest.offset, + &tmpAllocRequest.itemsToMakeLostCount, + &tmpAllocRequest.sumFreeSize, + &tmpAllocRequest.sumItemSize)) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + *pAllocationRequest = tmpAllocRequest; + pAllocationRequest->item = suballocIt; + break; + } + if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost()) + { + *pAllocationRequest = tmpAllocRequest; + pAllocationRequest->item = suballocIt; + found = true; + } + } + } + } + + return found; + } + + return false; +} + +bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal); + + while(pAllocationRequest->itemsToMakeLostCount > 0) + { + if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++pAllocationRequest->item; + } + VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); + VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost()); + if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item); + --pAllocationRequest->itemsToMakeLostCount; + } + else + { + return false; + } + } + + VMA_HEAVY_ASSERT(Validate()); + VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); + VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE); + + return true; +} + +uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + uint32_t lostAllocationCount = 0; + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE && + it->hAllocation->CanBecomeLost() && + it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + it = FreeSuballocation(it); + ++lostAllocationCount; + } + } + return lostAllocationCount; +} + +VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) +{ + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, it->offset + it->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Generic::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + VMA_ASSERT(request.item != m_Suballocations.end()); + VmaSuballocation& suballoc = *request.item; + // Given suballocation is a free block. + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Given offset is inside this suballocation. + VMA_ASSERT(request.offset >= suballoc.offset); + const VkDeviceSize paddingBegin = request.offset - suballoc.offset; + VMA_ASSERT(suballoc.size >= paddingBegin + allocSize); + const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize; + + // Unregister this free suballocation from m_FreeSuballocationsBySize and update + // it to become used. + UnregisterFreeSuballocation(request.item); + + suballoc.offset = request.offset; + suballoc.size = allocSize; + suballoc.type = type; + suballoc.hAllocation = hAllocation; + + // If there are any free bytes remaining at the end, insert new free suballocation after current one. + if(paddingEnd) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset + allocSize; + paddingSuballoc.size = paddingEnd; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + VmaSuballocationList::iterator next = request.item; + ++next; + const VmaSuballocationList::iterator paddingEndItem = + m_Suballocations.insert(next, paddingSuballoc); + RegisterFreeSuballocation(paddingEndItem); + } + + // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. + if(paddingBegin) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset - paddingBegin; + paddingSuballoc.size = paddingBegin; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + const VmaSuballocationList::iterator paddingBeginItem = + m_Suballocations.insert(request.item, paddingSuballoc); + RegisterFreeSuballocation(paddingBeginItem); + } + + // Update totals. + m_FreeCount = m_FreeCount - 1; + if(paddingBegin > 0) + { + ++m_FreeCount; + } + if(paddingEnd > 0) + { + ++m_FreeCount; + } + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == allocation) + { + FreeSuballocation(suballocItem); + VMA_HEAVY_ASSERT(Validate()); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.offset == offset) + { + FreeSuballocation(suballocItem); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const +{ + VkDeviceSize lastSize = 0; + for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { + const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; + + VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER); + VMA_VALIDATE(it->size >= lastSize); + lastSize = it->size; + } + return true; +} + +bool VmaBlockMetadata_Generic::CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(suballocItem != m_Suballocations.cend()); + VMA_ASSERT(pOffset != VMA_NULL); + + *itemsToMakeLostCount = 0; + *pSumFreeSize = 0; + *pSumItemSize = 0; + + if(canMakeOtherLost) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + *pSumFreeSize = suballocItem->size; + } + else + { + if(suballocItem->hAllocation->CanBecomeLost() && + suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + *pSumItemSize = suballocItem->size; + } + else + { + return false; + } + } + + // Remaining size is too small for this request: Early return. + if(GetSize() - suballocItem->offset < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + *pOffset = suballocItem->offset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + *pOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + *pOffset = VmaAlignUp(*pOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while(prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + } + } + + // Now that we have final *pOffset, check if we are past suballocItem. + // If yes, return false - this function should be called for another suballocItem as starting point. + if(*pOffset >= suballocItem->offset + suballocItem->size) + { + return false; + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset; + + // Calculate required margin at the end. + const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; + + const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin; + // Another early return check. + if(suballocItem->offset + totalSize > GetSize()) + { + return false; + } + + // Advance lastSuballocItem until desired size is reached. + // Update itemsToMakeLostCount. + VmaSuballocationList::const_iterator lastSuballocItem = suballocItem; + if(totalSize > suballocItem->size) + { + VkDeviceSize remainingSize = totalSize - suballocItem->size; + while(remainingSize > 0) + { + ++lastSuballocItem; + if(lastSuballocItem == m_Suballocations.cend()) + { + return false; + } + if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + *pSumFreeSize += lastSuballocItem->size; + } + else + { + VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE); + if(lastSuballocItem->hAllocation->CanBecomeLost() && + lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + *pSumItemSize += lastSuballocItem->size; + } + else + { + return false; + } + } + remainingSize = (lastSuballocItem->size < remainingSize) ? + remainingSize - lastSuballocItem->size : 0; + } + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, we must mark more allocations lost or fail. + if(bufferImageGranularity > 1 && (allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)) + { + VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem; + ++nextSuballocItem; + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE); + if(nextSuballoc.hAllocation->CanBecomeLost() && + nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + } + else + { + return false; + } + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + } + else + { + const VmaSuballocation& suballoc = *suballocItem; + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + *pSumFreeSize = suballoc.size; + + // Size of this suballocation is too small for this request: Early return. + if(suballoc.size < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + *pOffset = suballoc.offset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + *pOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + *pOffset = VmaAlignUp(*pOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while(prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + } + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = *pOffset - suballoc.offset; + + // Calculate required margin at the end. + const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; + + // Fail if requested size plus margin before and after is bigger than size of this suballocation. + if(paddingBegin + allocSize + requiredEndMargin > suballoc.size) + { + return false; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1 && (allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)) + { + VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; + ++nextSuballocItem; + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + } + + // All tests passed: Success. pOffset is already filled. + return true; +} + +void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item != m_Suballocations.end()); + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaSuballocationList::iterator nextItem = item; + ++nextItem; + VMA_ASSERT(nextItem != m_Suballocations.end()); + VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); + + item->size += nextItem->size; + --m_FreeCount; + m_Suballocations.erase(nextItem); +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +{ + // Change this suballocation to be marked as free. + VmaSuballocation& suballoc = *suballocItem; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + + // Update totals. + ++m_FreeCount; + m_SumFreeSize += suballoc.size; + + // Merge with previous and/or next suballocation if it's also free. + bool mergeWithNext = false; + bool mergeWithPrev = false; + + VmaSuballocationList::iterator nextItem = suballocItem; + ++nextItem; + if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) + { + mergeWithNext = true; + } + + VmaSuballocationList::iterator prevItem = suballocItem; + if(suballocItem != m_Suballocations.begin()) + { + --prevItem; + if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + mergeWithPrev = true; + } + } + + if(mergeWithNext) + { + UnregisterFreeSuballocation(nextItem); + MergeFreeWithNext(suballocItem); + } + + if(mergeWithPrev) + { + UnregisterFreeSuballocation(prevItem); + MergeFreeWithNext(prevItem); + RegisterFreeSuballocation(prevItem); + return prevItem; + } + else + { + RegisterFreeSuballocation(suballocItem); + return suballocItem; + } +} + +void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + if(m_FreeSuballocationsBySize.empty()) + { + m_FreeSuballocationsBySize.push_back(item); + } + else + { + VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); + } + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + + +void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), + item, + VmaSuballocationItemSizeLess()); + for(size_t index = it - m_FreeSuballocationsBySize.data(); + index < m_FreeSuballocationsBySize.size(); + ++index) + { + if(m_FreeSuballocationsBySize[index] == item) + { + VmaVectorRemove(m_FreeSuballocationsBySize, index); + return; + } + VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); + } + VMA_ASSERT(0 && "Not found."); + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + +bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible( + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const +{ + if(bufferImageGranularity == 1 || IsEmpty()) + { + return false; + } + + VkDeviceSize minAlignment = VK_WHOLE_SIZE; + bool typeConflictFound = false; + for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin(); + it != m_Suballocations.cend(); + ++it) + { + const VmaSuballocationType suballocType = it->type; + if(suballocType != VMA_SUBALLOCATION_TYPE_FREE) + { + minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment()); + if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) + { + typeConflictFound = true; + } + inOutPrevSuballocType = suballocType; + } + } + + return typeConflictFound || minAlignment >= bufferImageGranularity; +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Linear + +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_Suballocations1(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) +{ +} + +VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear() +{ +} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if(!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE); + } + if(!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + VkDeviceSize offset = VMA_DEBUG_MARGIN; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for(size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation == VK_NULL_HANDLE); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for(size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const +{ + const VkDeviceSize size = GetSize(); + + /* + We don't consider gaps inside allocation vectors with freed allocations because + they are not suitable for reuse in linear allocator. We consider only space that + is available for new allocations. + */ + if(IsEmpty()) + { + return size; + } + + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + /* + Available space is after end of 1st, as well as before beginning of 1st (which + whould make it a ring buffer). + */ + { + const size_t suballocations1stCount = suballocations1st.size(); + VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount); + const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1]; + return VMA_MAX( + firstSuballoc.offset, + size - (lastSuballoc.offset + lastSuballoc.size)); + } + break; + + case SECOND_VECTOR_RING_BUFFER: + /* + Available space is only between end of 2nd and beginning of 1st. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount]; + return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size); + } + break; + + case SECOND_VECTOR_DOUBLE_STACK: + /* + Available space is only between end of 1st and top of 2nd. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& topSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& lastSuballoc1st = suballocations1st.back(); + return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size); + } + break; + + default: + VMA_ASSERT(0); + return 0; + } +} + +void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + outInfo.blockCount = 1; + outInfo.allocationCount = (uint32_t)GetAllocationCount(); + outInfo.unusedRangeCount = 0; + outInfo.usedBytes = 0; + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + VkDeviceSize lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if(lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if(lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if(lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + outInfo.unusedBytes = size - outInfo.usedBytes; +} + +void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.size += size; + + VkDeviceSize lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + return upperAddress ? + CreateAllocationRequest_UpperAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize size = GetSize(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if(allocSize > size) + { + return false; + } + VkDeviceSize resultBaseOffset = size - allocSize; + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if(allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the end. + if(VMA_DEBUG_MARGIN > 0) + { + if(resultOffset < VMA_DEBUG_MARGIN) + { + return false; + } + resultOffset -= VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st; + pAllocationRequest->sumItemSize = 0; + // pAllocationRequest->item unused. + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize size = GetSize(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if(!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + resultOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : size; + + // There is enough free space at the end after alignment. + if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset; + pAllocationRequest->sumItemSize = 0; + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + pAllocationRequest->itemsToMakeLostCount = 0; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + resultOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->sumItemSize = 0; + size_t index1st = m_1stNullItemsBeginCount; + + if(canMakeOtherLost) + { + while(index1st < suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset) + { + // Next colliding allocation at the beginning of 1st vector found. Try to make it lost. + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) + { + // No problem. + } + else + { + VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++pAllocationRequest->itemsToMakeLostCount; + pAllocationRequest->sumItemSize += suballoc.size; + } + else + { + return false; + } + } + ++index1st; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, we must mark more allocations lost or fail. + if(bufferImageGranularity > 1) + { + while(index1st < suballocations1st.size()) + { + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity)) + { + if(suballoc.hAllocation != VK_NULL_HANDLE) + { + // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type). + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++pAllocationRequest->itemsToMakeLostCount; + pAllocationRequest->sumItemSize += suballoc.size; + } + else + { + return false; + } + } + } + else + { + // Already on next page. + break; + } + ++index1st; + } + } + + // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost. + if(index1st == suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > size) + { + // TODO: This is a known bug that it's not yet implemented and the allocation is failing. + VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost."); + } + } + + // There is enough free space at the end after alignment. + if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) || + (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + for(size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = + (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size) + - resultBaseOffset + - pAllocationRequest->sumItemSize; + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + if(pAllocationRequest->itemsToMakeLostCount == 0) + { + return true; + } + + VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER); + + // We always start from 1st. + SuballocationVectorType* suballocations = &AccessSuballocations1st(); + size_t index = m_1stNullItemsBeginCount; + size_t madeLostCount = 0; + while(madeLostCount < pAllocationRequest->itemsToMakeLostCount) + { + if(index == suballocations->size()) + { + index = 0; + // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st. + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + suballocations = &AccessSuballocations2nd(); + } + // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY: + // suballocations continues pointing at AccessSuballocations1st(). + VMA_ASSERT(!suballocations->empty()); + } + VmaSuballocation& suballoc = (*suballocations)[index]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); + VMA_ASSERT(suballoc.hAllocation->CanBecomeLost()); + if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + m_SumFreeSize += suballoc.size; + if(suballocations == &AccessSuballocations1st()) + { + ++m_1stNullItemsMiddleCount; + } + else + { + ++m_2ndNullItemsCount; + } + ++madeLostCount; + } + else + { + return false; + } + } + ++index; + } + + CleanupAfterFree(); + //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree(). + + return true; +} + +uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + uint32_t lostAllocationCount = 0; + + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += suballoc.size; + ++lostAllocationCount; + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + ++m_2ndNullItemsCount; + m_SumFreeSize += suballoc.size; + ++lostAllocationCount; + } + } + + if(lostAllocationCount) + { + CleanupAfterFree(); + } + + return lostAllocationCount; +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type }; + + switch(request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + request.offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(request.offset + allocSize <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) +{ + FreeAtOffset(allocation->GetOffset()); +} + +void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if(firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.hAllocation = VK_NULL_HANDLE; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if(lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if(lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + // Item from the middle of 1st vector. + { + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if(it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->hAllocation = VK_NULL_HANDLE; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if(m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if(it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->hAllocation = VK_NULL_HANDLE; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while(m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while(m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().hAllocation == VK_NULL_HANDLE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while(m_2ndNullItemsCount > 0 && + suballocations2nd.back().hAllocation == VK_NULL_HANDLE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while(m_2ndNullItemsCount > 0 && + suballocations2nd[0].hAllocation == VK_NULL_HANDLE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if(ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE) + { + ++srcIndex; + } + if(dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if(suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if(suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while(m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Buddy + +VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_Root(VMA_NULL), + m_AllocationCount(0), + m_FreeCount(1), + m_SumFreeSize(0) +{ + memset(m_FreeList, 0, sizeof(m_FreeList)); +} + +VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() +{ + DeleteNode(m_Root); +} + +void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_UsableSize = VmaPrevPow2(size); + m_SumFreeSize = m_UsableSize; + + // Calculate m_LevelCount. + m_LevelCount = 1; + while(m_LevelCount < MAX_LEVELS && + LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE) + { + ++m_LevelCount; + } + + Node* rootNode = vma_new(GetAllocationCallbacks(), Node)(); + rootNode->offset = 0; + rootNode->type = Node::TYPE_FREE; + rootNode->parent = VMA_NULL; + rootNode->buddy = VMA_NULL; + + m_Root = rootNode; + AddToFreeListFront(0, rootNode); +} + +bool VmaBlockMetadata_Buddy::Validate() const +{ + // Validate tree. + ValidationContext ctx; + if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) + { + VMA_VALIDATE(false && "ValidateNode failed."); + } + VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); + VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); + + // Validate free node lists. + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || + m_FreeList[level].front->free.prev == VMA_NULL); + + for(Node* node = m_FreeList[level].front; + node != VMA_NULL; + node = node->free.next) + { + VMA_VALIDATE(node->type == Node::TYPE_FREE); + + if(node->free.next == VMA_NULL) + { + VMA_VALIDATE(m_FreeList[level].back == node); + } + else + { + VMA_VALIDATE(node->free.next->free.prev == node); + } + } + } + + // Validate that free lists ar higher levels are empty. + for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); + } + + return true; +} + +VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const +{ + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + if(m_FreeList[level].front != VMA_NULL) + { + return LevelToNodeSize(level); + } + } + return 0; +} + +void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + const VkDeviceSize unusableSize = GetUnusableSize(); + + outInfo.blockCount = 1; + + outInfo.allocationCount = outInfo.unusedRangeCount = 0; + outInfo.usedBytes = outInfo.unusedBytes = 0; + + outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0; + outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused. + + CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0)); + + if(unusableSize > 0) + { + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusableSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize); + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize); + } +} + +void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const VkDeviceSize unusableSize = GetUnusableSize(); + + inoutStats.size += GetSize(); + inoutStats.unusedSize += m_SumFreeSize + unusableSize; + inoutStats.allocationCount += m_AllocationCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); + + if(unusableSize > 0) + { + ++inoutStats.unusedRangeCount; + // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations. + } +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const +{ + // TODO optimize + VmaStatInfo stat; + CalcAllocationStatInfo(stat); + + PrintDetailedMap_Begin( + json, + stat.unusedBytes, + stat.allocationCount, + stat.unusedRangeCount); + + PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if(unusableSize > 0) + { + PrintDetailedMap_UnusedRange(json, + m_UsableSize, // offset + unusableSize); // size + } + + PrintDetailedMap_End(json); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Buddy::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // Simple way to respect bufferImageGranularity. May be optimized some day. + // Whenever it might be an OPTIMAL image... + if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity); + allocSize = VMA_MAX(allocSize, bufferImageGranularity); + } + + if(allocSize > m_UsableSize) + { + return false; + } + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + for(uint32_t level = targetLevel + 1; level--; ) + { + for(Node* freeNode = m_FreeList[level].front; + freeNode != VMA_NULL; + freeNode = freeNode->free.next) + { + if(freeNode->offset % allocAlignment == 0) + { + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->offset = freeNode->offset; + pAllocationRequest->sumFreeSize = LevelToNodeSize(level); + pAllocationRequest->sumItemSize = 0; + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->customData = (void*)(uintptr_t)level; + return true; + } + } + } + + return false; +} + +bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + /* + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ + return pAllocationRequest->itemsToMakeLostCount == 0; +} + +uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + /* + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ + return 0; +} + +void VmaBlockMetadata_Buddy::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; + + Node* currNode = m_FreeList[currLevel].front; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + while(currNode->offset != request.offset) + { + currNode = currNode->free.next; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + } + + // Go down, splitting free nodes. + while(currLevel < targetLevel) + { + // currNode is already first free node at currLevel. + // Remove it from list of free nodes at this currLevel. + RemoveFromFreeList(currLevel, currNode); + + const uint32_t childrenLevel = currLevel + 1; + + // Create two free sub-nodes. + Node* leftChild = vma_new(GetAllocationCallbacks(), Node)(); + Node* rightChild = vma_new(GetAllocationCallbacks(), Node)(); + + leftChild->offset = currNode->offset; + leftChild->type = Node::TYPE_FREE; + leftChild->parent = currNode; + leftChild->buddy = rightChild; + + rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); + rightChild->type = Node::TYPE_FREE; + rightChild->parent = currNode; + rightChild->buddy = leftChild; + + // Convert current currNode to split type. + currNode->type = Node::TYPE_SPLIT; + currNode->split.leftChild = leftChild; + + // Add child nodes to free list. Order is important! + AddToFreeListFront(childrenLevel, rightChild); + AddToFreeListFront(childrenLevel, leftChild); + + ++m_FreeCount; + //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2. + ++currLevel; + currNode = m_FreeList[currLevel].front; + + /* + We can be sure that currNode, as left child of node previously split, + also fullfills the alignment requirement. + */ + } + + // Remove from free list. + VMA_ASSERT(currLevel == targetLevel && + currNode != VMA_NULL && + currNode->type == Node::TYPE_FREE); + RemoveFromFreeList(currLevel, currNode); + + // Convert to allocation node. + currNode->type = Node::TYPE_ALLOCATION; + currNode->allocation.alloc = hAllocation; + + ++m_AllocationCount; + --m_FreeCount; + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata_Buddy::DeleteNode(Node* node) +{ + if(node->type == Node::TYPE_SPLIT) + { + DeleteNode(node->split.leftChild->buddy); + DeleteNode(node->split.leftChild); + } + + vma_delete(GetAllocationCallbacks(), node); +} + +bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const +{ + VMA_VALIDATE(level < m_LevelCount); + VMA_VALIDATE(curr->parent == parent); + VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); + VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); + switch(curr->type) + { + case Node::TYPE_FREE: + // curr->free.prev, next are validated separately. + ctx.calculatedSumFreeSize += levelNodeSize; + ++ctx.calculatedFreeCount; + break; + case Node::TYPE_ALLOCATION: + ++ctx.calculatedAllocationCount; + ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize(); + VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE); + break; + case Node::TYPE_SPLIT: + { + const uint32_t childrenLevel = level + 1; + const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2; + const Node* const leftChild = curr->split.leftChild; + VMA_VALIDATE(leftChild != VMA_NULL); + VMA_VALIDATE(leftChild->offset == curr->offset); + if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for left child failed."); + } + const Node* const rightChild = leftChild->buddy; + VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); + if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for right child failed."); + } + } + break; + default: + return false; + } + + return true; +} + +uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const +{ + // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. + uint32_t level = 0; + VkDeviceSize currLevelNodeSize = m_UsableSize; + VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; + while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) + { + ++level; + currLevelNodeSize = nextLevelNodeSize; + nextLevelNodeSize = currLevelNodeSize >> 1; + } + return level; +} + +void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset) +{ + // Find node and level. + Node* node = m_Root; + VkDeviceSize nodeOffset = 0; + uint32_t level = 0; + VkDeviceSize levelNodeSize = LevelToNodeSize(0); + while(node->type == Node::TYPE_SPLIT) + { + const VkDeviceSize nextLevelSize = levelNodeSize >> 1; + if(offset < nodeOffset + nextLevelSize) + { + node = node->split.leftChild; + } + else + { + node = node->split.leftChild->buddy; + nodeOffset += nextLevelSize; + } + ++level; + levelNodeSize = nextLevelSize; + } + + VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); + VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc); + + ++m_FreeCount; + --m_AllocationCount; + m_SumFreeSize += alloc->GetSize(); + + node->type = Node::TYPE_FREE; + + // Join free nodes if possible. + while(level > 0 && node->buddy->type == Node::TYPE_FREE) + { + RemoveFromFreeList(level, node->buddy); + Node* const parent = node->parent; + + vma_delete(GetAllocationCallbacks(), node->buddy); + vma_delete(GetAllocationCallbacks(), node); + parent->type = Node::TYPE_FREE; + + node = parent; + --level; + //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2. + --m_FreeCount; + } + + AddToFreeListFront(level, node); +} + +void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += levelNodeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { + const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); + ++outInfo.allocationCount; + outInfo.usedBytes += allocSize; + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize); + outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize); + + const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize; + if(unusedRangeSize > 0) + { + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize); + } + } + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} + +void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) +{ + VMA_ASSERT(node->type == Node::TYPE_FREE); + + // List is empty. + Node* const frontNode = m_FreeList[level].front; + if(frontNode == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == VMA_NULL); + node->free.prev = node->free.next = VMA_NULL; + m_FreeList[level].front = m_FreeList[level].back = node; + } + else + { + VMA_ASSERT(frontNode->free.prev == VMA_NULL); + node->free.prev = VMA_NULL; + node->free.next = frontNode; + frontNode->free.prev = node; + m_FreeList[level].front = node; + } +} + +void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) +{ + VMA_ASSERT(m_FreeList[level].front != VMA_NULL); + + // It is at the front. + if(node->free.prev == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].front == node); + m_FreeList[level].front = node->free.next; + } + else + { + Node* const prevFreeNode = node->free.prev; + VMA_ASSERT(prevFreeNode->free.next == node); + prevFreeNode->free.next = node->free.next; + } + + // It is at the back. + if(node->free.next == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == node); + m_FreeList[level].back = node->free.prev; + } + else + { + Node* const nextFreeNode = node->free.next; + VMA_ASSERT(nextFreeNode->free.prev == node); + nextFreeNode->free.prev = node->free.prev; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { + PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc); + const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); + if(allocSize < levelNodeSize) + { + PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize); + } + } + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + PrintDetailedMapNode(json, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + PrintDetailedMapNode(json, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} +#endif // #if VMA_STATS_STRING_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaDeviceMemoryBlock + +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : + m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) +{ +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator); + break; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator); + break; + default: + VMA_ASSERT(0); + // Fall-through. + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator); + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = nullptr; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if(count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + if(m_MapCount != 0) + { + m_MapCount += count; + VMA_ASSERT(m_pMappedData != VMA_NULL); + if(ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if(result == VK_SUCCESS) + { + if(ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + m_MapCount = count; + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if(count == 0) + { + return; + } + + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + if(m_MapCount >= count) + { + m_MapCount -= count; + if(m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN); + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!"); + } + else if(!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} + +static void InitStatInfo(VmaStatInfo& outInfo) +{ + memset(&outInfo, 0, sizeof(outInfo)); + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMin = UINT64_MAX; +} + +// Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo. +static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo) +{ + inoutInfo.blockCount += srcInfo.blockCount; + inoutInfo.allocationCount += srcInfo.allocationCount; + inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount; + inoutInfo.usedBytes += srcInfo.usedBytes; + inoutInfo.unusedBytes += srcInfo.unusedBytes; + inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin); + inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax); + inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin); + inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax); +} + +static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo) +{ + inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ? + VmaRoundDiv(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0; + inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ? + VmaRoundDiv(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0; +} + +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) : + m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.frameInUseCount, + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm + m_Id(0), + m_Name(VMA_NULL) +{ +} + +VmaPool_T::~VmaPool_T() +{ +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if(pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} + +#if VMA_STATS_STRING_ENABLED + +#endif // #if VMA_STATS_STRING_ENABLED + +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm) : + m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_FrameInUseCount(frameInUseCount), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_HasEmptyBlock(false), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) +{ +} + +VmaBlockVector::~VmaBlockVector() +{ + for(size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for(size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + + pStats->size = 0; + pStats->unusedSize = 0; + pStats->allocationCount = 0; + pStats->unusedRangeCount = 0; + pStats->unusedRangeSizeMax = 0; + pStats->blockCount = blockCount; + + for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddPoolStats(*pStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; + +VkResult VmaBlockVector::Allocate( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + if(IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + currentFrameIndex, + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + } + + if(res != VK_SUCCESS) + { + // Free all already created allocations. + while(allocIndex--) + { + Free(pAllocations[allocIndex]); + } + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0; + const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + const bool withinBudget = (createInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0; + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !IsCustomPool(); + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer. + // Which in turn is available only when maxBlockCount = 1. + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1) + { + canMakeOtherLost = false; + } + + // Upper address can only be used with linear allocator and within single memory block. + if(isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Validate strategy. + switch(strategy) + { + case 0: + strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; + break; + case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: + break; + default: + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + /* + Under certain condition, this whole section can be skipped for optimization, so + we move on directly to trying to allocate with canMakeOtherLost. That's the case + e.g. for custom pools with linear algorithm. + */ + if(!canMakeOtherLost || canCreateNewBlock) + { + // 1. Search existing allocations. Try to allocate without making other allocations lost. + VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags; + allocFlagsCopy &= ~VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if(!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if(canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if(!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if(!m_ExplicitBlockSize) + { + while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if(smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if(res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + } + + // 3. Try to allocate from existing blocks with making other allocations lost. + if(canMakeOtherLost) + { + uint32_t tryIndex = 0; + for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex) + { + VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL; + VmaAllocationRequest bestRequest = {}; + VkDeviceSize bestRequestCost = VK_WHOLE_SIZE; + + // 1. Search existing allocations. + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VmaAllocationRequest currRequest = {}; + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { + const VkDeviceSize currRequestCost = currRequest.CalcCost(); + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost) + { + pBestRequestBlock = pCurrBlock; + bestRequest = currRequest; + bestRequestCost = currRequestCost; + + if(bestRequestCost == 0) + { + break; + } + } + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VmaAllocationRequest currRequest = {}; + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { + const VkDeviceSize currRequestCost = currRequest.CalcCost(); + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + pBestRequestBlock = pCurrBlock; + bestRequest = currRequest; + bestRequestCost = currRequestCost; + + if(bestRequestCost == 0 || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + break; + } + } + } + } + } + + if(pBestRequestBlock != VMA_NULL) + { + if(mapped) + { + VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + + if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost( + currentFrameIndex, + m_FrameInUseCount, + &bestRequest)) + { + // Allocate from this pBlock. + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(); + (*pAllocation)->Ctor(currentFrameIndex, isUserDataString); + pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBestRequestBlock, + bestRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + VMA_HEAVY_ASSERT(pBestRequestBlock->Validate()); + VMA_DEBUG_LOG(" Returned from existing block"); + (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; + } + // else: Some allocations must have been touched while we are here. Next try. + } + else + { + // Could not find place in any of the blocks - break outer loop. + break; + } + } + /* Maximum number of tries exceeded - a very unlike event when many other + threads are simultaneously touching allocations making it impossible to make + lost at the same time as we try to allocate. */ + if(tryIndex == VMA_ALLOCATION_TRY_COUNT) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free( + const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if(hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + pBlock->m_pMetadata->Free(hAllocation); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if(pBlock->m_pMetadata->IsEmpty()) + { + // Already has empty block. We don't want to have two, so delete this one. + if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have an empty block - leave it. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if(m_HasEmptyBlock && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if(pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + UpdateHasEmptyBlock(); + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if(pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG(" Deleted empty block"); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for(size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if(result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if(m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for(size_t i = 1; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0); + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if(pBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + isUpperAddress, + suballocType, + false, // canMakeOtherLost + strategy, + &currRequest)) + { + // Allocate from pCurrBlock. + VMA_ASSERT(currRequest.itemsToMakeLostCount == 0); + + if(mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(); + (*pAllocation)->Ctor(currentFrameIndex, isUserDataString); + pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBlock, + currRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + VMA_HEAVY_ASSERT(pBlock->Validate()); + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if(res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm); + + m_Blocks.push_back(pBlock); + if(pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +void VmaBlockVector::ApplyDefragmentationMovesCpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves) +{ + const size_t blockCount = m_Blocks.size(); + const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex); + + enum BLOCK_FLAG + { + BLOCK_FLAG_USED = 0x00000001, + BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002, + }; + + struct BlockInfo + { + uint32_t flags; + void* pMappedData; + }; + VmaVector< BlockInfo, VmaStlAllocator > + blockInfo(blockCount, BlockInfo(), VmaStlAllocator(m_hAllocator->GetAllocationCallbacks())); + memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo)); + + // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. + const size_t moveCount = moves.size(); + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED; + blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED; + } + + VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); + + // Go over all blocks. Get mapped pointer or map if necessary. + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + BlockInfo& currBlockInfo = blockInfo[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) + { + currBlockInfo.pMappedData = pBlock->GetMappedData(); + // It is not originally mapped - map it. + if(currBlockInfo.pMappedData == VMA_NULL) + { + pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData); + if(pDefragCtx->res == VK_SUCCESS) + { + currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION; + } + } + } + } + + // Go over all moves. Do actual data transfer. + if(pDefragCtx->res == VK_SUCCESS) + { + const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; + + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex]; + const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex]; + + VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData); + + // Invalidate source. + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex]; + memRange.memory = pSrcBlock->GetDeviceMemory(); + memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize); + memRange.size = VMA_MIN( + VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize), + pSrcBlock->m_pMetadata->GetSize() - memRange.offset); + (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); + } + + // THE PLACE WHERE ACTUAL DATA COPY HAPPENS. + memmove( + reinterpret_cast(dstBlockInfo.pMappedData) + move.dstOffset, + reinterpret_cast(srcBlockInfo.pMappedData) + move.srcOffset, + static_cast(move.size)); + + if(IsCorruptionDetectionEnabled()) + { + VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN); + VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size); + } + + // Flush destination. + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex]; + memRange.memory = pDstBlock->GetDeviceMemory(); + memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize); + memRange.size = VMA_MIN( + VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize), + pDstBlock->m_pMetadata->GetSize() - memRange.offset); + (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); + } + } + } + + // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation. + // Regardless of pCtx->res == VK_SUCCESS. + for(size_t blockIndex = blockCount; blockIndex--; ) + { + const BlockInfo& currBlockInfo = blockInfo[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + pBlock->Unmap(m_hAllocator, 1); + } + } +} + +void VmaBlockVector::ApplyDefragmentationMovesGpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkCommandBuffer commandBuffer) +{ + const size_t blockCount = m_Blocks.size(); + + pDefragCtx->blockContexts.resize(blockCount); + memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext)); + + // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. + const size_t moveCount = moves.size(); + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN) + { + // Old school move still require us to map the whole block + pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + } + } + + VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); + + // Go over all blocks. Create and bind buffer for whole block if necessary. + { + VkBufferCreateInfo bufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo); + + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) + { + bufCreateInfo.size = pBlock->m_pMetadata->GetSize(); + pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)( + m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer); + if(pDefragCtx->res == VK_SUCCESS) + { + pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)( + m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0); + } + } + } + } + + // Go over all moves. Post data transfer commands to command buffer. + if(pDefragCtx->res == VK_SUCCESS) + { + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex]; + const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex]; + + VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer); + + VkBufferCopy region = { + move.srcOffset, + move.dstOffset, + move.size }; + (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)( + commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion); + } + } + + // Save buffers to defrag context for later destruction. + if(pDefragCtx->res == VK_SUCCESS && moveCount > 0) + { + pDefragCtx->res = VK_NOT_READY; + } +} + +void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats) +{ + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if(pBlock->m_pMetadata->IsEmpty()) + { + if(m_Blocks.size() > m_MinBlockCount) + { + if(pDefragmentationStats != VMA_NULL) + { + ++pDefragmentationStats->deviceMemoryBlocksFreed; + pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize(); + } + + VmaVectorRemove(m_Blocks, blockIndex); + pBlock->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlock); + } + else + { + break; + } + } + } + UpdateHasEmptyBlock(); +} + +void VmaBlockVector::UpdateHasEmptyBlock() +{ + m_HasEmptyBlock = false; + for(size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if(pBlock->m_pMetadata->IsEmpty()) + { + m_HasEmptyBlock = true; + break; + } + } +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + json.BeginObject(); + + if(IsCustomPool()) + { + const char* poolName = m_hParentPool->GetName(); + if(poolName != VMA_NULL && poolName[0] != '\0') + { + json.WriteString("Name"); + json.WriteString(poolName); + } + + json.WriteString("MemoryTypeIndex"); + json.WriteNumber(m_MemoryTypeIndex); + + json.WriteString("BlockSize"); + json.WriteNumber(m_PreferredBlockSize); + + json.WriteString("BlockCount"); + json.BeginObject(true); + if(m_MinBlockCount > 0) + { + json.WriteString("Min"); + json.WriteNumber((uint64_t)m_MinBlockCount); + } + if(m_MaxBlockCount < SIZE_MAX) + { + json.WriteString("Max"); + json.WriteNumber((uint64_t)m_MaxBlockCount); + } + json.WriteString("Cur"); + json.WriteNumber((uint64_t)m_Blocks.size()); + json.EndObject(); + + if(m_FrameInUseCount > 0) + { + json.WriteString("FrameInUseCount"); + json.WriteNumber(m_FrameInUseCount); + } + + if(m_Algorithm != 0) + { + json.WriteString("Algorithm"); + json.WriteString(VmaAlgorithmToStr(m_Algorithm)); + } + } + else + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(m_PreferredBlockSize); + } + + json.WriteString("Blocks"); + json.BeginObject(); + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + } + json.EndObject(); + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +void VmaBlockVector::Defragment( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer) +{ + pCtx->res = VK_SUCCESS; + + const VkMemoryPropertyFlags memPropFlags = + m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags; + const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + + const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 && + isHostVisible; + const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 && + !IsCorruptionDetectionEnabled() && + ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0; + + // There are options to defragment this memory type. + if(canDefragmentOnCpu || canDefragmentOnGpu) + { + bool defragmentOnGpu; + // There is only one option to defragment this memory type. + if(canDefragmentOnGpu != canDefragmentOnCpu) + { + defragmentOnGpu = canDefragmentOnGpu; + } + // Both options are available: Heuristics to choose the best one. + else + { + defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 || + m_hAllocator->IsIntegratedGpu(); + } + + bool overlappingMoveSupported = !defragmentOnGpu; + + if(m_hAllocator->m_UseMutex) + { + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(!m_Mutex.TryLockWrite()) + { + pCtx->res = VK_ERROR_INITIALIZATION_FAILED; + return; + } + } + else + { + m_Mutex.LockWrite(); + pCtx->mutexLocked = true; + } + } + + pCtx->Begin(overlappingMoveSupported, flags); + + // Defragment. + + const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove; + const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove; + pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags); + + // Accumulate statistics. + if(pStats != VMA_NULL) + { + const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved(); + const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved(); + pStats->bytesMoved += bytesMoved; + pStats->allocationsMoved += allocationsMoved; + VMA_ASSERT(bytesMoved <= maxBytesToMove); + VMA_ASSERT(allocationsMoved <= maxAllocationsToMove); + if(defragmentOnGpu) + { + maxGpuBytesToMove -= bytesMoved; + maxGpuAllocationsToMove -= allocationsMoved; + } + else + { + maxCpuBytesToMove -= bytesMoved; + maxCpuAllocationsToMove -= allocationsMoved; + } + } + + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(m_hAllocator->m_UseMutex) + m_Mutex.UnlockWrite(); + + if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty()) + pCtx->res = VK_NOT_READY; + + return; + } + + if(pCtx->res >= VK_SUCCESS) + { + if(defragmentOnGpu) + { + ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer); + } + else + { + ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves); + } + } + } +} + +void VmaBlockVector::DefragmentationEnd( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats) +{ + // Destroy buffers. + for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; ) + { + VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex]; + if(blockCtx.hBuffer) + { + (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)( + m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); + } + } + + if(pCtx->res >= VK_SUCCESS) + { + FreeEmptyBlocks(pStats); + } + + if(pCtx->mutexLocked) + { + VMA_ASSERT(m_hAllocator->m_UseMutex); + m_Mutex.UnlockWrite(); + } +} + +uint32_t VmaBlockVector::ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + const uint32_t moveCount = std::min(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves); + + for(uint32_t i = pCtx->defragmentationMovesProcessed; i < moveCount; ++ i) + { + VmaDefragmentationMove& move = pCtx->defragmentationMoves[i]; + + pMove->allocation = move.hAllocation; + pMove->memory = move.pDstBlock->GetDeviceMemory(); + pMove->offset = move.dstOffset; + + ++ pMove; + } + + pCtx->defragmentationMovesProcessed += moveCount; + + return moveCount; +} + +void VmaBlockVector::CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i) + { + const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i]; + + move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset); + move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset); + } + + pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed; + FreeEmptyBlocks(pStats); +} + +size_t VmaBlockVector::CalcAllocationCount() const +{ + size_t result = 0; + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + result += m_Blocks[i]->m_pMetadata->GetAllocationCount(); + } + return result; +} + +bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const +{ + if(m_BufferImageGranularity == 1) + { + return false; + } + VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE; + for(size_t i = 0, count = m_Blocks.size(); i < count; ++i) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[i]; + VMA_ASSERT(m_Algorithm == 0); + VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata; + if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) + { + return true; + } + } + return false; +} + +void VmaBlockVector::MakePoolAllocationsLost( + uint32_t currentFrameIndex, + size_t* pLostAllocationCount) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + size_t lostAllocationCount = 0; + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount); + } + if(pLostAllocationCount != VMA_NULL) + { + *pLostAllocationCount = lostAllocationCount; + } +} + +VkResult VmaBlockVector::CheckCorruption() +{ + if(!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if(res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStats(VmaStats* pStats) +{ + const uint32_t memTypeIndex = m_MemoryTypeIndex; + const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex); + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + VmaStatInfo allocationStatInfo; + pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo); + VmaAddStatInfo(pStats->total, allocationStatInfo); + VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); + VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationAlgorithm_Generic members definition + +VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + // Create block info for each block. + const size_t blockCount = m_pBlockVector->m_Blocks.size(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks()); + pBlockInfo->m_OriginalBlockIndex = blockIndex; + pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex]; + m_Blocks.push_back(pBlockInfo); + } + + // Sort them by m_pBlock pointer value. + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess()); +} + +VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() +{ + for(size_t i = m_Blocks.size(); i--; ) + { + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ + // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost. + if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST) + { + VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock(); + BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess()); + if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock) + { + AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged); + (*it)->m_Allocations.push_back(allocInfo); + } + else + { + VMA_ASSERT(0); + } + + ++m_AllocationCount; + } +} + +VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations) +{ + if(m_Blocks.empty()) + { + return VK_SUCCESS; + } + + // This is a choice based on research. + // Option 1: + uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT; + // Option 2: + //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT; + // Option 3: + //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT; + + size_t srcBlockMinIndex = 0; + // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations. + /* + if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT) + { + const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount(); + if(blocksWithNonMovableCount > 0) + { + srcBlockMinIndex = blocksWithNonMovableCount - 1; + } + } + */ + + size_t srcBlockIndex = m_Blocks.size() - 1; + size_t srcAllocIndex = SIZE_MAX; + for(;;) + { + // 1. Find next allocation to move. + // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source". + // 1.2. Then start from last to first m_Allocations. + while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) + { + if(m_Blocks[srcBlockIndex]->m_Allocations.empty()) + { + // Finished: no more allocations to process. + if(srcBlockIndex == srcBlockMinIndex) + { + return VK_SUCCESS; + } + else + { + --srcBlockIndex; + srcAllocIndex = SIZE_MAX; + } + } + else + { + srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1; + } + } + + BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex]; + AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex]; + + const VkDeviceSize size = allocInfo.m_hAllocation->GetSize(); + const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset(); + const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment(); + const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType(); + + // 2. Try to find new place for this allocation in preceding or current block. + for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) + { + BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; + VmaAllocationRequest dstAllocRequest; + if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest( + m_CurrentFrameIndex, + m_pBlockVector->GetFrameInUseCount(), + m_pBlockVector->GetBufferImageGranularity(), + size, + alignment, + false, // upperAddress + suballocType, + false, // canMakeOtherLost + strategy, + &dstAllocRequest) && + MoveMakesSense( + dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) + { + VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0); + + // Reached limit on number of allocations or bytes to move. + if((m_AllocationsMoved + 1 > maxAllocationsToMove) || + (m_BytesMoved + size > maxBytesToMove)) + { + return VK_SUCCESS; + } + + VmaDefragmentationMove move = {}; + move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex; + move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex; + move.srcOffset = srcOffset; + move.dstOffset = dstAllocRequest.offset; + move.size = size; + move.hAllocation = allocInfo.m_hAllocation; + move.pSrcBlock = pSrcBlockInfo->m_pBlock; + move.pDstBlock = pDstBlockInfo->m_pBlock; + + moves.push_back(move); + + pDstBlockInfo->m_pBlock->m_pMetadata->Alloc( + dstAllocRequest, + suballocType, + size, + allocInfo.m_hAllocation); + + if(freeOldAllocations) + { + pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); + allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); + } + + if(allocInfo.m_pChanged != VMA_NULL) + { + *allocInfo.m_pChanged = VK_TRUE; + } + + ++m_AllocationsMoved; + m_BytesMoved += size; + + VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex); + + break; + } + } + + // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round. + + if(srcAllocIndex > 0) + { + --srcAllocIndex; + } + else + { + if(srcBlockIndex > 0) + { + --srcBlockIndex; + srcAllocIndex = SIZE_MAX; + } + else + { + return VK_SUCCESS; + } + } + } +} + +size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const +{ + size_t result = 0; + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i]->m_HasNonMovableAllocations) + { + ++result; + } + } + return result; +} + +VkResult VmaDefragmentationAlgorithm_Generic::Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + if(!m_AllAllocations && m_AllocationCount == 0) + { + return VK_SUCCESS; + } + + const size_t blockCount = m_Blocks.size(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = m_Blocks[blockIndex]; + + if(m_AllAllocations) + { + VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata; + for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL); + pBlockInfo->m_Allocations.push_back(allocInfo); + } + } + } + + pBlockInfo->CalcHasNonMovableAllocations(); + + // This is a choice based on research. + // Option 1: + pBlockInfo->SortAllocationsByOffsetDescending(); + // Option 2: + //pBlockInfo->SortAllocationsBySizeDescending(); + } + + // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks. + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination()); + + // This is a choice based on research. + const uint32_t roundCount = 2; + + // Execute defragmentation rounds (the main part). + VkResult result = VK_SUCCESS; + for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) + { + result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)); + } + + return result; +} + +bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense( + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset) +{ + if(dstBlockIndex < srcBlockIndex) + { + return true; + } + if(dstBlockIndex > srcBlockIndex) + { + return false; + } + if(dstOffset < srcOffset) + { + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationAlgorithm_Fast + +VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_OverlappingMoveSupported(overlappingMoveSupported), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_BlockInfos(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN == 0); + +} + +VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast() +{ +} + +VkResult VmaDefragmentationAlgorithm_Fast::Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount); + + const size_t blockCount = m_pBlockVector->GetBlockCount(); + if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) + { + return VK_SUCCESS; + } + + PreprocessMetadata(); + + // Sort blocks in order from most destination. + + m_BlockInfos.resize(blockCount); + for(size_t i = 0; i < blockCount; ++i) + { + m_BlockInfos[i].origBlockIndex = i; + } + + VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool { + return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() < + m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize(); + }); + + // THE MAIN ALGORITHM + + FreeSpaceDatabase freeSpaceDb; + + size_t dstBlockInfoIndex = 0; + size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; + VkDeviceSize dstBlockSize = pDstMetadata->GetSize(); + VkDeviceSize dstOffset = 0; + + bool end = false; + for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) + { + const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex); + VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata; + for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin(); + !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); ) + { + VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation; + const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment(); + const VkDeviceSize srcAllocSize = srcSuballocIt->size; + if(m_AllocationsMoved == maxAllocationsToMove || + m_BytesMoved + srcAllocSize > maxBytesToMove) + { + end = true; + break; + } + const VkDeviceSize srcAllocOffset = srcSuballocIt->offset; + + VmaDefragmentationMove move = {}; + // Try to place it in one of free spaces from the database. + size_t freeSpaceInfoIndex; + VkDeviceSize dstAllocOffset; + if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize, + freeSpaceInfoIndex, dstAllocOffset)) + { + size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); + VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata; + + // Same block + if(freeSpaceInfoIndex == srcBlockInfoIndex) + { + VMA_ASSERT(dstAllocOffset <= srcAllocOffset); + + // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeOffset(dstAllocOffset); + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + InsertSuballoc(pFreeSpaceMetadata, suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + // Different block + else + { + // MOVE OPTION 2: Move the allocation to a different block. + + VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex); + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset); + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + InsertSuballoc(pFreeSpaceMetadata, suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + else + { + dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment); + + // If the allocation doesn't fit before the end of dstBlock, forward to next block. + while(dstBlockInfoIndex < srcBlockInfoIndex && + dstAllocOffset + srcAllocSize > dstBlockSize) + { + // But before that, register remaining free space at the end of dst block. + freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset); + + ++dstBlockInfoIndex; + dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; + pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; + dstBlockSize = pDstMetadata->GetSize(); + dstOffset = 0; + dstAllocOffset = 0; + } + + // Same block + if(dstBlockInfoIndex == srcBlockInfoIndex) + { + VMA_ASSERT(dstAllocOffset <= srcAllocOffset); + + const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset; + + bool skipOver = overlap; + if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) + { + // If destination and source place overlap, skip if it would move it + // by only < 1/64 of its size. + skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize; + } + + if(skipOver) + { + freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset); + + dstOffset = srcAllocOffset + srcAllocSize; + ++srcSuballocIt; + } + // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. + else + { + srcSuballocIt->offset = dstAllocOffset; + srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset); + dstOffset = dstAllocOffset + srcAllocSize; + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + ++srcSuballocIt; + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + // Different block + else + { + // MOVE OPTION 2: Move the allocation to a different block. + + VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex); + VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize); + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset); + dstOffset = dstAllocOffset + srcAllocSize; + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + pDstMetadata->m_Suballocations.push_back(suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + } + } + + m_BlockInfos.clear(); + + PostprocessMetadata(); + + return VK_SUCCESS; +} + +void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() +{ + const size_t blockCount = m_pBlockVector->GetBlockCount(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + pMetadata->m_FreeCount = 0; + pMetadata->m_SumFreeSize = pMetadata->GetSize(); + pMetadata->m_FreeSuballocationsBySize.clear(); + for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); ) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE) + { + VmaSuballocationList::iterator nextIt = it; + ++nextIt; + pMetadata->m_Suballocations.erase(it); + it = nextIt; + } + else + { + ++it; + } + } + } +} + +void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() +{ + const size_t blockCount = m_pBlockVector->GetBlockCount(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + const VkDeviceSize blockSize = pMetadata->GetSize(); + + // No allocations in this block - entire area is free. + if(pMetadata->m_Suballocations.empty()) + { + pMetadata->m_FreeCount = 1; + //pMetadata->m_SumFreeSize is already set to blockSize. + VmaSuballocation suballoc = { + 0, // offset + blockSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + pMetadata->m_Suballocations.push_back(suballoc); + pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin()); + } + // There are some allocations in this block. + else + { + VkDeviceSize offset = 0; + VmaSuballocationList::iterator it; + for(it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(it->offset >= offset); + + // Need to insert preceding free space. + if(it->offset > offset) + { + ++pMetadata->m_FreeCount; + const VkDeviceSize freeSize = it->offset - offset; + VmaSuballocation suballoc = { + offset, // offset + freeSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); + if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt); + } + } + + pMetadata->m_SumFreeSize -= it->size; + offset = it->offset + it->size; + } + + // Need to insert trailing free space. + if(offset < blockSize) + { + ++pMetadata->m_FreeCount; + const VkDeviceSize freeSize = blockSize - offset; + VmaSuballocation suballoc = { + offset, // offset + freeSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + VMA_ASSERT(it == pMetadata->m_Suballocations.end()); + VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); + if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt); + } + } + + VMA_SORT( + pMetadata->m_FreeSuballocationsBySize.begin(), + pMetadata->m_FreeSuballocationsBySize.end(), + VmaSuballocationItemSizeLess()); + } + + VMA_HEAVY_ASSERT(pMetadata->Validate()); + } +} + +void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc) +{ + // TODO: Optimize somehow. Remember iterator instead of searching for it linearly. + VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + while(it != pMetadata->m_Suballocations.end()) + { + if(it->offset < suballoc.offset) + { + ++it; + } + } + pMetadata->m_Suballocations.insert(it, suballoc); +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaBlockVectorDefragmentationContext + +VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( + VmaAllocator hAllocator, + VmaPool hCustomPool, + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex) : + res(VK_SUCCESS), + mutexLocked(false), + blockContexts(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + defragmentationMoves(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + defragmentationMovesProcessed(0), + defragmentationMovesCommitted(0), + hasDefragmentationPlan(0), + m_hAllocator(hAllocator), + m_hCustomPool(hCustomPool), + m_pBlockVector(pBlockVector), + m_CurrFrameIndex(currFrameIndex), + m_pAlgorithm(VMA_NULL), + m_Allocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_AllAllocations(false) +{ +} + +VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() +{ + vma_delete(m_hAllocator, m_pAlgorithm); +} + +void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ + AllocInfo info = { hAlloc, pChanged }; + m_Allocations.push_back(info); +} + +void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags) +{ + const bool allAllocations = m_AllAllocations || + m_Allocations.size() == m_pBlockVector->CalcAllocationCount(); + + /******************************** + HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM. + ********************************/ + + /* + Fast algorithm is supported only when certain criteria are met: + - VMA_DEBUG_MARGIN is 0. + - All allocations in this block vector are moveable. + - There is no possibility of image/buffer granularity conflict. + - The defragmentation is not incremental + */ + if(VMA_DEBUG_MARGIN == 0 && + allAllocations && + !m_pBlockVector->IsBufferImageGranularityConflictPossible() && + !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)) + { + m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)( + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + else + { + m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)( + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + + if(allAllocations) + { + m_pAlgorithm->AddAll(); + } + else + { + for(size_t i = 0, count = m_Allocations.size(); i < count; ++i) + { + m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged); + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationContext + +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats) : + m_hAllocator(hAllocator), + m_CurrFrameIndex(currFrameIndex), + m_Flags(flags), + m_pStats(pStats), + m_CustomPoolContexts(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts)); +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; + pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); + vma_delete(m_hAllocator, pBlockVectorCtx); + } + for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; + if(pBlockVectorCtx) + { + pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats); + vma_delete(m_hAllocator, pBlockVectorCtx); + } + } +} + +void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools) +{ + for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { + VmaPool pool = pPools[poolIndex]; + VMA_ASSERT(pool); + // Pools with algorithm other than default are not defragmented. + if(pool->m_BlockVector.GetAlgorithm() == 0) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == pool) + { + pBlockVectorDefragCtx = m_CustomPoolContexts[i]; + break; + } + } + + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + pool, + &pool->m_BlockVector, + m_CurrFrameIndex); + m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + } + + pBlockVectorDefragCtx->AddAll(); + } + } +} + +void VmaDefragmentationContext_T::AddAllocations( + uint32_t allocationCount, + VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged) +{ + // Dispatch pAllocations among defragmentators. Create them when necessary. + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation hAlloc = pAllocations[allocIndex]; + VMA_ASSERT(hAlloc); + // DedicatedAlloc cannot be defragmented. + if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) && + // Lost allocation cannot be defragmented. + (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool(); + // This allocation belongs to custom pool. + if(hAllocPool != VK_NULL_HANDLE) + { + // Pools with algorithm other than default are not defragmented. + if(hAllocPool->m_BlockVector.GetAlgorithm() == 0) + { + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) + { + pBlockVectorDefragCtx = m_CustomPoolContexts[i]; + break; + } + } + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + hAllocPool, + &hAllocPool->m_BlockVector, + m_CurrFrameIndex); + m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + } + } + } + // This allocation belongs to default pool. + else + { + const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); + pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + VMA_NULL, // hCustomPool + m_hAllocator->m_pBlockVectors[memTypeIndex], + m_CurrFrameIndex); + m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx; + } + } + + if(pBlockVectorDefragCtx) + { + VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ? + &pAllocationsChanged[allocIndex] : VMA_NULL; + pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged); + } + } + } +} + +VkResult VmaDefragmentationContext_T::Defragment( + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags) +{ + if(pStats) + { + memset(pStats, 0, sizeof(VmaDefragmentationStats)); + } + + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + // For incremental defragmetnations, we just earmark how much we can move + // The real meat is in the defragmentation steps + m_MaxCpuBytesToMove = maxCpuBytesToMove; + m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove; + + m_MaxGpuBytesToMove = maxGpuBytesToMove; + m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove; + + if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 && + m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0) + return VK_SUCCESS; + + return VK_NOT_READY; + } + + if(commandBuffer == VK_NULL_HANDLE) + { + maxGpuBytesToMove = 0; + maxGpuAllocationsToMove = 0; + } + + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { + res = pBlockVectorCtx->res; + } + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount && res >= VK_SUCCESS; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { + res = pBlockVectorCtx->res; + } + } + + return res; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo) +{ + VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves; + uint32_t movesLeft = pInfo->moveCount; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + + pInfo->moveCount = pInfo->moveCount - movesLeft; + + return VK_SUCCESS; +} +VkResult VmaDefragmentationContext_T::DefragmentPassEnd() +{ + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + + return res; +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaRecorder + +#if VMA_RECORDING_ENABLED + +VmaRecorder::VmaRecorder() : + m_UseMutex(true), + m_Flags(0), + m_File(VMA_NULL), + m_Freq(INT64_MAX), + m_StartCounter(INT64_MAX) +{ +} + +VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) +{ + m_UseMutex = useMutex; + m_Flags = settings.flags; + + QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq); + QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter); + + // Open file for writing. + errno_t err = fopen_s(&m_File, settings.pFilePath, "wb"); + if(err != 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + // Write header. + fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); + fprintf(m_File, "%s\n", "1,8"); + + return VK_SUCCESS; +} + +VmaRecorder::~VmaRecorder() +{ + if(m_File != VMA_NULL) + { + fclose(m_File); + } +} + +void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex); + Flush(); +} + +void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex); + Flush(); +} + +void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex, + createInfo.memoryTypeIndex, + createInfo.flags, + createInfo.blockSize, + (uint64_t)createInfo.minBlockCount, + (uint64_t)createInfo.maxBlockCount, + createInfo.frameInUseCount, + pool); + Flush(); +} + +void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex, + pool); + Flush(); +} + +void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, ",%s\n", userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, "\n"); + Flush(); +} + +void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex, + VmaAllocation allocation, + const void* pUserData) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr( + allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0, + pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordMapMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, + offset, + size); + Flush(); +} + +void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, + offset, + size); + Flush(); +} + +void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex, + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + bufCreateInfo.flags, + bufCreateInfo.size, + bufCreateInfo.usage, + bufCreateInfo.sharingMode, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordCreateImage(uint32_t frameIndex, + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + imageCreateInfo.flags, + imageCreateInfo.imageType, + imageCreateInfo.format, + imageCreateInfo.extent.width, + imageCreateInfo.extent.height, + imageCreateInfo.extent.depth, + imageCreateInfo.mipLevels, + imageCreateInfo.arrayLayers, + imageCreateInfo.samples, + imageCreateInfo.tiling, + imageCreateInfo.usage, + imageCreateInfo.sharingMode, + imageCreateInfo.initialLayout, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordDestroyImage(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex, + VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex, + pool); + Flush(); +} + +void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex, + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex, + info.flags); + PrintPointerList(info.allocationCount, info.pAllocations); + fprintf(m_File, ","); + PrintPointerList(info.poolCount, info.pPools); + fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n", + info.maxCpuBytesToMove, + info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, + info.maxGpuAllocationsToMove, + info.commandBuffer, + ctx); + Flush(); +} + +void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex, + VmaDefragmentationContext ctx) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex, + ctx); + Flush(); +} + +void VmaRecorder::RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + pool, name != VMA_NULL ? name : ""); + Flush(); +} + +VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData) +{ + if(pUserData != VMA_NULL) + { + if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0) + { + m_Str = (const char*)pUserData; + } + else + { + sprintf_s(m_PtrStr, "%p", pUserData); + m_Str = m_PtrStr; + } + } + else + { + m_Str = ""; + } +} + +void VmaRecorder::WriteConfiguration( + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled) +{ + fprintf(m_File, "Config,Begin\n"); + + fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion)); + + fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion); + fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion); + fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID); + fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID); + fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType); + fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName); + + fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount); + fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity); + fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize); + + fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount); + for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i) + { + fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size); + fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags); + } + fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount); + for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i) + { + fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex); + fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags); + } + + fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0); + + fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT); + fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN); + fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY); + fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE); + fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + fprintf(m_File, "Config,End\n"); +} + +void VmaRecorder::GetBasicParams(CallParams& outParams) +{ + outParams.threadId = GetCurrentThreadId(); + + LARGE_INTEGER counter; + QueryPerformanceCounter(&counter); + outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq; +} + +void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems) +{ + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } +} + +void VmaRecorder::Flush() +{ + if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) + { + fflush(m_File); + } +} + +#endif // #if VMA_RECORDING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// VmaAllocationObjectAllocator + +VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) : + m_Allocator(pAllocationCallbacks, 1024) +{ +} + +VmaAllocation VmaAllocationObjectAllocator::Allocate() +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaAllocator_T + +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_CurrentFrameIndex(0), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_Pools(VmaStlAllocator(GetAllocationCallbacks())), + m_NextPoolId(0) +#if VMA_RECORDING_ENABLED + ,m_pRecorder(VMA_NULL) +#endif +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + pCreateInfo->frameInUseCount, + false, // explicitBlockSize + false); // linearAlgorithm + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // becase minBlockCount is 0. + m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator(GetAllocationCallbacks())); + + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + + if(pCreateInfo->pRecordSettings != VMA_NULL && + !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath)) + { +#if VMA_RECORDING_ENABLED + m_pRecorder = vma_new(this, VmaRecorder)(); + res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex); + if(res != VK_SUCCESS) + { + return res; + } + m_pRecorder->WriteConfiguration( + m_PhysicalDeviceProperties, + m_MemProps, + m_VulkanApiVersion, + m_UseKhrDedicatedAllocation, + m_UseKhrBindMemory2, + m_UseExtMemoryBudget); + m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex()); +#else + VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1."); + return VK_ERROR_FEATURE_NOT_PRESENT; +#endif + } + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ +#if VMA_RECORDING_ENABLED + if(m_pRecorder != VMA_NULL) + { + m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex()); + vma_delete(this, m_pRecorder); + } +#endif + + VMA_ASSERT(m_Pools.empty()); + + for(size_t i = GetMemoryTypeCount(); i--; ) + { + if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty()) + { + VMA_ASSERT(0 && "Unfreed dedicated allocations found."); + } + + vma_delete(this, m_pDedicatedAllocations[i]); + vma_delete(this, m_pBlockVectors[i]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_hInstance != VK_NULL_HANDLE); + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = + (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2"); + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = + (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2"); + m_VulkanFunctions.vkBindBufferMemory2KHR = + (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2"); + m_VulkanFunctions.vkBindImageMemory2KHR = + (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2"); + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = + (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = + (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR"); + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = + (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR"); + } +#endif +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + m_VulkanFunctions.vkBindBufferMemory2KHR = + (PFN_vkBindBufferMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindBufferMemory2KHR"); + m_VulkanFunctions.vkBindImageMemory2KHR = + (PFN_vkBindImageMemory2KHR)vkGetDeviceProcAddr(m_hDevice, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget && m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_hInstance != VK_NULL_HANDLE); + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = + (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vkGetInstanceProcAddr(m_hInstance, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET +#endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + if(pVulkanFunctions != VMA_NULL) + { + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif +#if VMA_MEMORY_BUDGET + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + } + +#undef VMA_COPY_IF_NOT_NULL + + // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1 + // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions. + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + // If memory is lazily allocated, it should be always dedicated. + if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector); + + const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize(); + bool preferDedicatedMemory = + VMA_DEBUG_ALWAYS_DEDICATED_MEMORY || + dedicatedAllocation || + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + size > preferredBlockSize / 2; + + if(preferDedicatedMemory && + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + finalCreateInfo.pool == VK_NULL_HANDLE) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + return AllocateDedicatedMemory( + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + dedicatedBuffer, + dedicatedImage, + allocationCount, + pAllocations); + } + } + else + { + VkResult res = blockVector->Allocate( + m_CurrentFrameIndex.load(), + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { + return res; + } + + // 5. Try dedicated memory. + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + res = AllocateDedicatedMemory( + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + dedicatedBuffer, + dedicatedImage, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + else + { + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + } + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + if(withinBudget) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetBudget(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + allocInfo.pNext = &dedicatedAllocInfo; + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + allocInfo.pNext = &dedicatedAllocInfo; + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + // Register them in m_pDedicatedAllocations. + { + VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + VmaVectorInsertSorted(*pDedicatedAllocations, pAllocations[allocIndex]); + } + } + + VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + currAlloc->SetUserData(this, VMA_NULL); + currAlloc->Dtor(); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(); + (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString); + (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size); + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + memReq2.pNext = &memDedicatedReq; + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + memReq2.pNext = &memDedicatedReq; + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(requiresDedicatedAllocation) + { + if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(createInfo.pool != VK_NULL_HANDLE) + { + VMA_ASSERT(0 && "Pool specified while dedicated allocation is required."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + if((createInfo.pool != VK_NULL_HANDLE) && + ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + if(createInfo.pool != VK_NULL_HANDLE) + { + const VkDeviceSize alignmentForPool = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex())); + + VmaAllocationCreateInfo createInfoForPool = createInfo; + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + return createInfo.pool->m_BlockVector.Allocate( + m_CurrentFrameIndex.load(), + vkMemReq.size, + alignmentForPool, + createInfoForPool, + suballocType, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); + if(res == VK_SUCCESS) + { + VkDeviceSize alignmentForMemType = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + + res = AllocateMemoryOfType( + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); + // Succeeded on first try. + if(res == VK_SUCCESS) + { + return res; + } + // Allocation from this memory type failed. Try other compatible memory types. + else + { + for(;;) + { + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); + if(res == VK_SUCCESS) + { + alignmentForMemType = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + + res = AllocateMemoryOfType( + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); + // Allocation from this alternative memory type succeeded. + if(res == VK_SUCCESS) + { + return res; + } + // else: Allocation from this memory type failed. Try next one - next loop iteration. + } + // No other matching memory type index could be found. + else + { + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + } + // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + else + return res; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(TouchAllocation(allocation)) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetBlock()->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + + // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes. + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + allocation->SetUserData(this, VMA_NULL); + allocation->Dtor(); + m_AllocationObjectAllocator.Free(allocation); + } + } +} + +VkResult VmaAllocator_T::ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize) +{ + // This function is deprecated and so it does nothing. It's left for backward compatibility. + if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(newSize == alloc->GetSize()) + { + return VK_SUCCESS; + } + return VK_ERROR_OUT_OF_POOL_MEMORY; +} + +void VmaAllocator_T::CalculateStats(VmaStats* pStats) +{ + // Initialize. + InitStatInfo(pStats->total); + for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + InitStatInfo(pStats->memoryType[i]); + for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + InitStatInfo(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector); + pBlockVector->AddStats(pStats); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + m_Pools[poolIndex]->m_BlockVector.AddStats(pStats); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocVector); + for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) + { + VmaStatInfo allocationStatInfo; + (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo); + VmaAddStatInfo(pStats->total, allocationStatInfo); + VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); + VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); + } + } + + // Postprocess. + VmaPostprocessCalcStatInfo(pStats->total); + for(size_t i = 0; i < GetMemoryTypeCount(); ++i) + VmaPostprocessCalcStatInfo(pStats->memoryType[i]); + for(size_t i = 0; i < GetMemoryHeapCount(); ++i) + VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]); +} + +void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudget->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudget->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetBudget(outBudget, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudget->usage = outBudget->blockBytes; + outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +VkResult VmaAllocator_T::DefragmentationBegin( + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext) +{ + if(info.pAllocationsChanged != VMA_NULL) + { + memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32)); + } + + *pContext = vma_new(this, VmaDefragmentationContext_T)( + this, m_CurrentFrameIndex.load(), info.flags, pStats); + + (*pContext)->AddPools(info.poolCount, info.pPools); + (*pContext)->AddAllocations( + info.allocationCount, info.pAllocations, info.pAllocationsChanged); + + VkResult res = (*pContext)->Defragment( + info.maxCpuBytesToMove, info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, info.maxGpuAllocationsToMove, + info.commandBuffer, pStats, info.flags); + + if(res != VK_NOT_READY) + { + vma_delete(this, *pContext); + *pContext = VMA_NULL; + } + + return res; +} + +VkResult VmaAllocator_T::DefragmentationEnd( + VmaDefragmentationContext context) +{ + vma_delete(this, context); + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context) +{ + return context->DefragmentPassBegin(pInfo); +} +VkResult VmaAllocator_T::DefragmentationPassEnd( + VmaDefragmentationContext context) +{ + return context->DefragmentPassEnd(); + +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + if(hAllocation->CanBecomeLost()) + { + /* + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ + const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + pAllocationInfo->memoryType = UINT32_MAX; + pAllocationInfo->deviceMemory = VK_NULL_HANDLE; + pAllocationInfo->offset = 0; + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = VMA_NULL; + pAllocationInfo->pUserData = hAllocation->GetUserData(); + return; + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = VMA_NULL; + pAllocationInfo->pUserData = hAllocation->GetUserData(); + return; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } + } + else + { +#if VMA_STATS_STRING_ENABLED + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); + if(localLastUseFrameIndex == localCurrFrameIndex) + { + break; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } +#endif + + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + } +} + +bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) +{ + // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo. + if(hAllocation->CanBecomeLost()) + { + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + return false; + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { + return true; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } + } + else + { +#if VMA_STATS_STRING_ENABLED + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); + if(localLastUseFrameIndex == localCurrFrameIndex) + { + break; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } +#endif + + return true; + } +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + VmaVectorInsertSorted(m_Pools, *pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + bool success = VmaVectorRemoveSorted(m_Pools, pool); + VMA_ASSERT(success && "Pool not found in Allocator."); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats) +{ + pool->m_BlockVector.GetPoolStats(pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +void VmaAllocator_T::MakePoolAllocationsLost( + VmaPool hPool, + size_t* pLostAllocationCount) +{ + hPool->m_BlockVector.MakePoolAllocationsLost( + m_CurrentFrameIndex.load(), + pLostAllocationCount); +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(((1u << memTypeIndex) & memoryTypeBits) != 0) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector); + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation) +{ + *pAllocation = m_AllocationObjectAllocator.Allocate(); + (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false); + (*pAllocation)->InitLost(); +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize); + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + if(hAllocation->CanBecomeLost()) + { + return VK_ERROR_MEMORY_MAP_FAILED; + } + + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +void VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize allocationSize = hAllocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + + VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; + memRange.memory = hAllocation->GetMemory(); + + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + memRange.size = allocationSize - memRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + memRange.size = VMA_MIN( + VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize), + allocationSize - memRange.offset); + } + break; + + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = hAllocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize(); + memRange.offset += allocationOffset; + memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset); + + break; + } + + default: + VMA_ASSERT(0); + } + + switch(op) + { + case VMA_CACHE_FLUSH: + (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + { + VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + bool success = VmaVectorRemoveSorted(*pDedicatedAllocations, allocation); + VMA_ASSERT(success); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +#if VMA_MEMORY_BUDGET + +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + memProps.pNext = &budgetProps; + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} + +#endif // #if VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + !hAllocation->CanBecomeLost() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED + +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + bool dedicatedAllocationsStarted = false; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocVector); + if(pDedicatedAllocVector->empty() == false) + { + if(dedicatedAllocationsStarted == false) + { + dedicatedAllocationsStarted = true; + json.WriteString("DedicatedAllocations"); + json.BeginObject(); + } + + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + + json.BeginArray(); + + for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i) + { + json.BeginObject(true); + const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i]; + hAlloc->PrintParameters(json); + json.EndObject(); + } + + json.EndArray(); + } + } + if(dedicatedAllocationsStarted) + { + json.EndObject(); + } + + { + bool allocationsStarted = false; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false) + { + if(allocationsStarted == false) + { + allocationsStarted = true; + json.WriteString("DefaultPools"); + json.BeginObject(); + } + + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + + m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json); + } + } + if(allocationsStarted) + { + json.EndObject(); + } + } + + // Custom pools + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + const size_t poolCount = m_Pools.size(); + if(poolCount > 0) + { + json.WriteString("Pools"); + json.BeginObject(); + for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { + json.BeginString(); + json.ContinueString(m_Pools[poolIndex]->GetId()); + json.EndString(); + + m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json); + } + json.EndObject(); + } + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// Public interface + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 1)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + return (*pAllocator)->Init(pCreateInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator allocator, + VmaStats* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStats(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator allocator, + VmaBudget* pBudget) +{ + VMA_ASSERT(allocator && pBudget); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator); + { + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaBudget budget[VK_MAX_MEMORY_HEAPS]; + allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount()); + + VmaStats stats; + allocator->CalculateStats(&stats); + + json.WriteString("Total"); + VmaPrintStatInfo(json, stats.total); + + for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + + json.WriteString("Size"); + json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size); + + json.WriteString("Flags"); + json.BeginArray(true); + if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) + { + json.WriteString("DEVICE_LOCAL"); + } + json.EndArray(); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BlockBytes"); + json.WriteNumber(budget[heapIndex].blockBytes); + json.WriteString("AllocationBytes"); + json.WriteNumber(budget[heapIndex].allocationBytes); + json.WriteString("Usage"); + json.WriteNumber(budget[heapIndex].usage); + json.WriteString("Budget"); + json.WriteNumber(budget[heapIndex].budget); + } + json.EndObject(); + + if(stats.memoryHeap[heapIndex].blockCount > 0) + { + json.WriteString("Stats"); + VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]); + } + + for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + + json.BeginObject(); + + json.WriteString("Flags"); + json.BeginArray(true); + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) + { + json.WriteString("DEVICE_LOCAL"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + json.WriteString("HOST_VISIBLE"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) + { + json.WriteString("HOST_COHERENT"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) + { + json.WriteString("HOST_CACHED"); + } + if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) + { + json.WriteString("LAZILY_ALLOCATED"); + } + json.EndArray(); + + if(stats.memoryType[typeIndex].blockCount > 0) + { + json.WriteString("Stats"); + VmaPrintStatInfo(json, stats.memoryType[typeIndex]); + } + + json.EndObject(); + } + } + + json.EndObject(); + } + if(detailedMap == VK_TRUE) + { + allocator->PrintDetailedMap(json); + } + + json.EndObject(); + } + + const size_t len = sb.GetLength(); + char* const pChars = vma_new_array(allocator, char, len + 1); + if(len > 0) + { + memcpy(pChars, sb.GetData(), len); + } + pChars[len] = '\0'; + *ppStatsString = pChars; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + size_t len = strlen(pStatsString); + vma_delete_array(allocator, pStatsString, len + 1); + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags; + uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags; + uint32_t notPreferredFlags = 0; + + // Convert usage to requiredFlags and preferredFlags. + switch(pAllocationCreateInfo->usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + default: + VMA_ASSERT(0); + break; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < allocator->GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + + VmaCountBitsSet(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + VkBuffer hBuffer = VK_NULL_HANDLE; + VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements( + hDev, hBuffer, &memReq); + + res = vmaFindMemoryTypeIndex( + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); + + allocator->GetVulkanFunctions().vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + VkImage hImage = VK_NULL_HANDLE; + VkResult res = allocator->GetVulkanFunctions().vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + allocator->GetVulkanFunctions().vkGetImageMemoryRequirements( + hDev, hImage, &memReq); + + res = vmaFindMemoryTypeIndex( + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); + + allocator->GetVulkanFunctions().vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->CreatePool(pCreateInfo, pPool); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool); + } +#endif + + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool); + } +#endif + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator allocator, + VmaPool pool, + VmaPoolStats* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStats(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator allocator, + VmaPool pool, + size_t* pLostAllocationCount) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool); + } +#endif + + allocator->MakePoolAllocationsLost(pool, pLostAllocationCount); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemory( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryPages( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + (uint64_t)allocationCount, + pAllocations); + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForBuffer( + allocator->GetCurrentFrameIndex(), + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForImage( + allocator->GetCurrentFrameIndex(), + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemoryPages( + allocator->GetCurrentFrameIndex(), + (uint64_t)allocationCount, + pAllocations); + } +#endif + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaResizeAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->ResizeAllocation(allocation, newSize); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordGetAllocationInfo( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordTouchAllocation( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + return allocator->TouchAllocation(allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetAllocationUserData( + allocator->GetCurrentFrameIndex(), + allocation, + pUserData); + } +#endif +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator allocator, + VmaAllocation* pAllocation) +{ + VMA_ASSERT(allocator && pAllocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + allocator->CreateLostAllocation(pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateLostAllocation( + allocator->GetCurrentFrameIndex(), + *pAllocation); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->Map(allocation, ppData); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordMapMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordUnmapMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFlushAllocation( + allocator->GetCurrentFrameIndex(), + allocation, offset, size); + } +#endif +} + +VMA_CALL_PRE void VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordInvalidateAllocation( + allocator->GetCurrentFrameIndex(), + allocation, offset, size); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator allocator, + VmaAllocation* pAllocations, + size_t allocationCount, + VkBool32* pAllocationsChanged, + const VmaDefragmentationInfo *pDefragmentationInfo, + VmaDefragmentationStats* pDefragmentationStats) +{ + // Deprecated interface, reimplemented using new one. + + VmaDefragmentationInfo2 info2 = {}; + info2.allocationCount = (uint32_t)allocationCount; + info2.pAllocations = pAllocations; + info2.pAllocationsChanged = pAllocationsChanged; + if(pDefragmentationInfo != VMA_NULL) + { + info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove; + info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove; + } + else + { + info2.maxCpuAllocationsToMove = UINT32_MAX; + info2.maxCpuBytesToMove = VK_WHOLE_SIZE; + } + // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero. + + VmaDefragmentationContext ctx; + VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx); + if(res == VK_NOT_READY) + { + res = vmaDefragmentationEnd( allocator, ctx); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator allocator, + const VmaDefragmentationInfo2* pInfo, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext *pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + // Degenerate case: Nothing to defragment. + if(pInfo->allocationCount == 0 && pInfo->poolCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL); + VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations)); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools)); + + VMA_DEBUG_LOG("vmaDefragmentationBegin"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDefragmentationBegin( + allocator->GetCurrentFrameIndex(), *pInfo, *pContext); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaDefragmentationEnd"); + + if(context != VK_NULL_HANDLE) + { + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDefragmentationEnd( + allocator->GetCurrentFrameIndex(), context); + } +#endif + + return allocator->DefragmentationEnd(context); + } + else + { + return VK_SUCCESS; + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationPassInfo* pInfo + ) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(pInfo); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->moveCount, pInfo->pMoves)); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + { + pInfo->moveCount = 0; + return VK_SUCCESS; + } + + return allocator->DefragmentationPassBegin(pInfo, context); +} +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + return VK_SUCCESS; + + return allocator->DefragmentationPassEnd(context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // Make sure alignment requirements for specific buffer usages reported + // in Physical Device Properties are included in alignment reported by memory requirements. + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0) + { + VMA_ASSERT(vkMemReq.alignment % + allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0); + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0) + { + VMA_ASSERT(vkMemReq.alignment % + allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0); + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0) + { + VMA_ASSERT(vkMemReq.alignment % + allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0); + } + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateBuffer( + allocator->GetCurrentFrameIndex(), + *pBufferCreateInfo, + *pAllocationCreateInfo, + *pAllocation); + } +#endif + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyBuffer( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateImage( + allocator->GetCurrentFrameIndex(), + *pImageCreateInfo, + *pAllocationCreateInfo, + *pAllocation); + } +#endif + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator allocator, + VkImage image, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyImage( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +#endif // #ifdef VMA_IMPLEMENTATION diff --git a/3rdparty/MoltenVK/.gitignore b/3rdparty/MoltenVK/.gitignore deleted file mode 100644 index b67fced9fd..0000000000 --- a/3rdparty/MoltenVK/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -.ninja_log -build.ninja -cmake_install.cmake -CMakeCache.txt -CMakeFiles -MoltenVK -moltenvk-prefix diff --git a/3rdparty/MoltenVK/CMakeLists.txt b/3rdparty/MoltenVK/CMakeLists.txt deleted file mode 100644 index ec9c2b802b..0000000000 --- a/3rdparty/MoltenVK/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -project(moltenvk NONE) -include(ExternalProject) - -ExternalProject_Add(moltenvk - GIT_REPOSITORY https://github.com/KhronosGroup/MoltenVK.git - GIT_TAG 49b97f2 - BUILD_IN_SOURCE 1 - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK - CONFIGURE_COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/fetchDependencies" --macos - BUILD_COMMAND xcodebuild build -quiet -project "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVKPackaging.xcodeproj" -scheme "MoltenVK Package \(macOS only\)" -configuration "Release" -arch "${CMAKE_HOST_SYSTEM_PROCESSOR}" - COMMAND ln -f "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/MoltenVK/dylib/macOS/libMoltenVK.dylib" "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/Build/Products/Release/dynamic/libMoltenVK.dylib" - INSTALL_COMMAND "" - BUILD_BYPRODUCTS "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/Build/Products/Release/dynamic/libMoltenVK.dylib" -) diff --git a/3rdparty/OpenAL/CMakeLists.txt b/3rdparty/OpenAL/CMakeLists.txt index b9fee23ce5..715534e5bf 100644 --- a/3rdparty/OpenAL/CMakeLists.txt +++ b/3rdparty/OpenAL/CMakeLists.txt @@ -1,18 +1,11 @@ # OpenAL -if(USE_SYSTEM_OPENAL) - if(WIN32) - find_package(OpenAL CONFIG REQUIRED) - else() - find_package(OpenAL REQUIRED) - endif() - add_library(3rdparty_openal INTERFACE) - target_link_libraries(3rdparty_openal INTERFACE OpenAL::OpenAL) - set_target_properties(OpenAL::OpenAL PROPERTIES IMPORTED_GLOBAL ON) +if (MSVC) + find_path(OPENAL_INCLUDE_DIR al.h PATHS OpenAL/include) + find_library(OPENAL_LIBRARY OpenAL32 PATHS OpenAL/libs/Win64/) else() - option(ALSOFT_UTILS "Build utility programs" OFF) - option(ALSOFT_EXAMPLES "Build example programs" OFF) - set(LIBTYPE "STATIC") - add_subdirectory(openal-soft EXCLUDE_FROM_ALL) - add_library(3rdparty_openal INTERFACE) - target_link_libraries(3rdparty_openal INTERFACE OpenAL::OpenAL) + find_package(OpenAL REQUIRED) endif() + +add_library(3rdparty_openal INTERFACE) +target_include_directories(3rdparty_openal INTERFACE ${OPENAL_INCLUDE_DIR}) +target_link_libraries(3rdparty_openal INTERFACE ${OPENAL_LIBRARY}) diff --git a/3rdparty/OpenAL/include/al.h b/3rdparty/OpenAL/include/al.h new file mode 100644 index 0000000000..8749e1b77e --- /dev/null +++ b/3rdparty/OpenAL/include/al.h @@ -0,0 +1,655 @@ +#ifndef AL_AL_H +#define AL_AL_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef AL_API + #if defined(AL_LIBTYPE_STATIC) + #define AL_API + #elif defined(_WIN32) + #define AL_API __declspec(dllimport) + #else + #define AL_API extern + #endif +#endif + +#if defined(_WIN32) + #define AL_APIENTRY __cdecl +#else + #define AL_APIENTRY +#endif + + +/* Deprecated macros. */ +#define OPENAL +#define ALAPI AL_API +#define ALAPIENTRY AL_APIENTRY +#define AL_INVALID (-1) +#define AL_ILLEGAL_ENUM AL_INVALID_ENUM +#define AL_ILLEGAL_COMMAND AL_INVALID_OPERATION + +/* Supported AL versions. */ +#define AL_VERSION_1_0 +#define AL_VERSION_1_1 + +/** 8-bit boolean */ +typedef char ALboolean; + +/** character */ +typedef char ALchar; + +/** signed 8-bit 2's complement integer */ +typedef signed char ALbyte; + +/** unsigned 8-bit integer */ +typedef unsigned char ALubyte; + +/** signed 16-bit 2's complement integer */ +typedef short ALshort; + +/** unsigned 16-bit integer */ +typedef unsigned short ALushort; + +/** signed 32-bit 2's complement integer */ +typedef int ALint; + +/** unsigned 32-bit integer */ +typedef unsigned int ALuint; + +/** non-negative 32-bit binary integer size */ +typedef int ALsizei; + +/** enumerated 32-bit value */ +typedef int ALenum; + +/** 32-bit IEEE754 floating-point */ +typedef float ALfloat; + +/** 64-bit IEEE754 floating-point */ +typedef double ALdouble; + +/** void type (for opaque pointers only) */ +typedef void ALvoid; + + +/* Enumerant values begin at column 50. No tabs. */ + +/** "no distance model" or "no buffer" */ +#define AL_NONE 0 + +/** Boolean False. */ +#define AL_FALSE 0 + +/** Boolean True. */ +#define AL_TRUE 1 + + +/** + * Relative source. + * Type: ALboolean + * Range: [AL_TRUE, AL_FALSE] + * Default: AL_FALSE + * + * Specifies if the Source has relative coordinates. + */ +#define AL_SOURCE_RELATIVE 0x202 + + +/** + * Inner cone angle, in degrees. + * Type: ALint, ALfloat + * Range: [0 - 360] + * Default: 360 + * + * The angle covered by the inner cone, where the source will not attenuate. + */ +#define AL_CONE_INNER_ANGLE 0x1001 + +/** + * Outer cone angle, in degrees. + * Range: [0 - 360] + * Default: 360 + * + * The angle covered by the outer cone, where the source will be fully + * attenuated. + */ +#define AL_CONE_OUTER_ANGLE 0x1002 + +/** + * Source pitch. + * Type: ALfloat + * Range: [0.5 - 2.0] + * Default: 1.0 + * + * A multiplier for the frequency (sample rate) of the source's buffer. + */ +#define AL_PITCH 0x1003 + +/** + * Source or listener position. + * Type: ALfloat[3], ALint[3] + * Default: {0, 0, 0} + * + * The source or listener location in three dimensional space. + * + * OpenAL, like OpenGL, uses a right handed coordinate system, where in a + * frontal default view X (thumb) points right, Y points up (index finger), and + * Z points towards the viewer/camera (middle finger). + * + * To switch from a left handed coordinate system, flip the sign on the Z + * coordinate. + */ +#define AL_POSITION 0x1004 + +/** + * Source direction. + * Type: ALfloat[3], ALint[3] + * Default: {0, 0, 0} + * + * Specifies the current direction in local space. + * A zero-length vector specifies an omni-directional source (cone is ignored). + */ +#define AL_DIRECTION 0x1005 + +/** + * Source or listener velocity. + * Type: ALfloat[3], ALint[3] + * Default: {0, 0, 0} + * + * Specifies the current velocity in local space. + */ +#define AL_VELOCITY 0x1006 + +/** + * Source looping. + * Type: ALboolean + * Range: [AL_TRUE, AL_FALSE] + * Default: AL_FALSE + * + * Specifies whether source is looping. + */ +#define AL_LOOPING 0x1007 + +/** + * Source buffer. + * Type: ALuint + * Range: any valid Buffer. + * + * Specifies the buffer to provide sound samples. + */ +#define AL_BUFFER 0x1009 + +/** + * Source or listener gain. + * Type: ALfloat + * Range: [0.0 - ] + * + * A value of 1.0 means unattenuated. Each division by 2 equals an attenuation + * of about -6dB. Each multiplicaton by 2 equals an amplification of about + * +6dB. + * + * A value of 0.0 is meaningless with respect to a logarithmic scale; it is + * silent. + */ +#define AL_GAIN 0x100A + +/** + * Minimum source gain. + * Type: ALfloat + * Range: [0.0 - 1.0] + * + * The minimum gain allowed for a source, after distance and cone attenation is + * applied (if applicable). + */ +#define AL_MIN_GAIN 0x100D + +/** + * Maximum source gain. + * Type: ALfloat + * Range: [0.0 - 1.0] + * + * The maximum gain allowed for a source, after distance and cone attenation is + * applied (if applicable). + */ +#define AL_MAX_GAIN 0x100E + +/** + * Listener orientation. + * Type: ALfloat[6] + * Default: {0.0, 0.0, -1.0, 0.0, 1.0, 0.0} + * + * Effectively two three dimensional vectors. The first vector is the front (or + * "at") and the second is the top (or "up"). + * + * Both vectors are in local space. + */ +#define AL_ORIENTATION 0x100F + +/** + * Source state (query only). + * Type: ALint + * Range: [AL_INITIAL, AL_PLAYING, AL_PAUSED, AL_STOPPED] + */ +#define AL_SOURCE_STATE 0x1010 + +/* Source state values. */ +#define AL_INITIAL 0x1011 +#define AL_PLAYING 0x1012 +#define AL_PAUSED 0x1013 +#define AL_STOPPED 0x1014 + +/** + * Source Buffer Queue size (query only). + * Type: ALint + * + * The number of buffers queued using alSourceQueueBuffers, minus the buffers + * removed with alSourceUnqueueBuffers. + */ +#define AL_BUFFERS_QUEUED 0x1015 + +/** + * Source Buffer Queue processed count (query only). + * Type: ALint + * + * The number of queued buffers that have been fully processed, and can be + * removed with alSourceUnqueueBuffers. + * + * Looping sources will never fully process buffers because they will be set to + * play again for when the source loops. + */ +#define AL_BUFFERS_PROCESSED 0x1016 + +/** + * Source reference distance. + * Type: ALfloat + * Range: [0.0 - ] + * Default: 1.0 + * + * The distance in units that no attenuation occurs. + * + * At 0.0, no distance attenuation ever occurs on non-linear attenuation models. + */ +#define AL_REFERENCE_DISTANCE 0x1020 + +/** + * Source rolloff factor. + * Type: ALfloat + * Range: [0.0 - ] + * Default: 1.0 + * + * Multiplier to exaggerate or diminish distance attenuation. + * + * At 0.0, no distance attenuation ever occurs. + */ +#define AL_ROLLOFF_FACTOR 0x1021 + +/** + * Outer cone gain. + * Type: ALfloat + * Range: [0.0 - 1.0] + * Default: 0.0 + * + * The gain attenuation applied when the listener is outside of the source's + * outer cone. + */ +#define AL_CONE_OUTER_GAIN 0x1022 + +/** + * Source maximum distance. + * Type: ALfloat + * Range: [0.0 - ] + * Default: FLT_MAX + * + * The distance above which the source is not attenuated any further with a + * clamped distance model, or where attenuation reaches 0.0 gain for linear + * distance models with a default rolloff factor. + */ +#define AL_MAX_DISTANCE 0x1023 + +/** Source buffer position, in seconds */ +#define AL_SEC_OFFSET 0x1024 +/** Source buffer position, in sample frames */ +#define AL_SAMPLE_OFFSET 0x1025 +/** Source buffer position, in bytes */ +#define AL_BYTE_OFFSET 0x1026 + +/** + * Source type (query only). + * Type: ALint + * Range: [AL_STATIC, AL_STREAMING, AL_UNDETERMINED] + * + * A Source is Static if a Buffer has been attached using AL_BUFFER. + * + * A Source is Streaming if one or more Buffers have been attached using + * alSourceQueueBuffers. + * + * A Source is Undetermined when it has the NULL buffer attached using + * AL_BUFFER. + */ +#define AL_SOURCE_TYPE 0x1027 + +/* Source type values. */ +#define AL_STATIC 0x1028 +#define AL_STREAMING 0x1029 +#define AL_UNDETERMINED 0x1030 + +/** Unsigned 8-bit mono buffer format. */ +#define AL_FORMAT_MONO8 0x1100 +/** Signed 16-bit mono buffer format. */ +#define AL_FORMAT_MONO16 0x1101 +/** Unsigned 8-bit stereo buffer format. */ +#define AL_FORMAT_STEREO8 0x1102 +/** Signed 16-bit stereo buffer format. */ +#define AL_FORMAT_STEREO16 0x1103 + +/** Buffer frequency (query only). */ +#define AL_FREQUENCY 0x2001 +/** Buffer bits per sample (query only). */ +#define AL_BITS 0x2002 +/** Buffer channel count (query only). */ +#define AL_CHANNELS 0x2003 +/** Buffer data size (query only). */ +#define AL_SIZE 0x2004 + +/* Buffer state. Not for public use. */ +#define AL_UNUSED 0x2010 +#define AL_PENDING 0x2011 +#define AL_PROCESSED 0x2012 + + +/** No error. */ +#define AL_NO_ERROR 0 + +/** Invalid name paramater passed to AL call. */ +#define AL_INVALID_NAME 0xA001 + +/** Invalid enum parameter passed to AL call. */ +#define AL_INVALID_ENUM 0xA002 + +/** Invalid value parameter passed to AL call. */ +#define AL_INVALID_VALUE 0xA003 + +/** Illegal AL call. */ +#define AL_INVALID_OPERATION 0xA004 + +/** Not enough memory. */ +#define AL_OUT_OF_MEMORY 0xA005 + + +/** Context string: Vendor ID. */ +#define AL_VENDOR 0xB001 +/** Context string: Version. */ +#define AL_VERSION 0xB002 +/** Context string: Renderer ID. */ +#define AL_RENDERER 0xB003 +/** Context string: Space-separated extension list. */ +#define AL_EXTENSIONS 0xB004 + + +/** + * Doppler scale. + * Type: ALfloat + * Range: [0.0 - ] + * Default: 1.0 + * + * Scale for source and listener velocities. + */ +#define AL_DOPPLER_FACTOR 0xC000 +AL_API void AL_APIENTRY alDopplerFactor(ALfloat value); + +/** + * Doppler velocity (deprecated). + * + * A multiplier applied to the Speed of Sound. + */ +#define AL_DOPPLER_VELOCITY 0xC001 +AL_API void AL_APIENTRY alDopplerVelocity(ALfloat value); + +/** + * Speed of Sound, in units per second. + * Type: ALfloat + * Range: [0.0001 - ] + * Default: 343.3 + * + * The speed at which sound waves are assumed to travel, when calculating the + * doppler effect. + */ +#define AL_SPEED_OF_SOUND 0xC003 +AL_API void AL_APIENTRY alSpeedOfSound(ALfloat value); + +/** + * Distance attenuation model. + * Type: ALint + * Range: [AL_NONE, AL_INVERSE_DISTANCE, AL_INVERSE_DISTANCE_CLAMPED, + * AL_LINEAR_DISTANCE, AL_LINEAR_DISTANCE_CLAMPED, + * AL_EXPONENT_DISTANCE, AL_EXPONENT_DISTANCE_CLAMPED] + * Default: AL_INVERSE_DISTANCE_CLAMPED + * + * The model by which sources attenuate with distance. + * + * None - No distance attenuation. + * Inverse - Doubling the distance halves the source gain. + * Linear - Linear gain scaling between the reference and max distances. + * Exponent - Exponential gain dropoff. + * + * Clamped variations work like the non-clamped counterparts, except the + * distance calculated is clamped between the reference and max distances. + */ +#define AL_DISTANCE_MODEL 0xD000 +AL_API void AL_APIENTRY alDistanceModel(ALenum distanceModel); + +/* Distance model values. */ +#define AL_INVERSE_DISTANCE 0xD001 +#define AL_INVERSE_DISTANCE_CLAMPED 0xD002 +#define AL_LINEAR_DISTANCE 0xD003 +#define AL_LINEAR_DISTANCE_CLAMPED 0xD004 +#define AL_EXPONENT_DISTANCE 0xD005 +#define AL_EXPONENT_DISTANCE_CLAMPED 0xD006 + +/* Renderer State management. */ +AL_API void AL_APIENTRY alEnable(ALenum capability); +AL_API void AL_APIENTRY alDisable(ALenum capability); +AL_API ALboolean AL_APIENTRY alIsEnabled(ALenum capability); + +/* State retrieval. */ +AL_API const ALchar* AL_APIENTRY alGetString(ALenum param); +AL_API void AL_APIENTRY alGetBooleanv(ALenum param, ALboolean *values); +AL_API void AL_APIENTRY alGetIntegerv(ALenum param, ALint *values); +AL_API void AL_APIENTRY alGetFloatv(ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetDoublev(ALenum param, ALdouble *values); +AL_API ALboolean AL_APIENTRY alGetBoolean(ALenum param); +AL_API ALint AL_APIENTRY alGetInteger(ALenum param); +AL_API ALfloat AL_APIENTRY alGetFloat(ALenum param); +AL_API ALdouble AL_APIENTRY alGetDouble(ALenum param); + +/* Error retrieval. */ + +/** Obtain the first error generated in the AL context since the last check. */ +AL_API ALenum AL_APIENTRY alGetError(void); + +/** Query for the presence of an extension on the AL context. */ +AL_API ALboolean AL_APIENTRY alIsExtensionPresent(const ALchar *extname); +/** + * Retrieve the address of a function. The returned function may be context- + * specific. + */ +AL_API void* AL_APIENTRY alGetProcAddress(const ALchar *fname); +/** + * Retrieve the value of an enum. The returned value may be context-specific. + */ +AL_API ALenum AL_APIENTRY alGetEnumValue(const ALchar *ename); + + +/* Set Listener parameters */ +AL_API void AL_APIENTRY alListenerf(ALenum param, ALfloat value); +AL_API void AL_APIENTRY alListener3f(ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +AL_API void AL_APIENTRY alListenerfv(ALenum param, const ALfloat *values); +AL_API void AL_APIENTRY alListeneri(ALenum param, ALint value); +AL_API void AL_APIENTRY alListener3i(ALenum param, ALint value1, ALint value2, ALint value3); +AL_API void AL_APIENTRY alListeneriv(ALenum param, const ALint *values); + +/* Get Listener parameters */ +AL_API void AL_APIENTRY alGetListenerf(ALenum param, ALfloat *value); +AL_API void AL_APIENTRY alGetListener3f(ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +AL_API void AL_APIENTRY alGetListenerfv(ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetListeneri(ALenum param, ALint *value); +AL_API void AL_APIENTRY alGetListener3i(ALenum param, ALint *value1, ALint *value2, ALint *value3); +AL_API void AL_APIENTRY alGetListeneriv(ALenum param, ALint *values); + + +/** Create Source objects. */ +AL_API void AL_APIENTRY alGenSources(ALsizei n, ALuint *sources); +/** Delete Source objects. */ +AL_API void AL_APIENTRY alDeleteSources(ALsizei n, const ALuint *sources); +/** Verify a handle is a valid Source. */ +AL_API ALboolean AL_APIENTRY alIsSource(ALuint source); + +/* Set Source parameters. */ +AL_API void AL_APIENTRY alSourcef(ALuint source, ALenum param, ALfloat value); +AL_API void AL_APIENTRY alSource3f(ALuint source, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +AL_API void AL_APIENTRY alSourcefv(ALuint source, ALenum param, const ALfloat *values); +AL_API void AL_APIENTRY alSourcei(ALuint source, ALenum param, ALint value); +AL_API void AL_APIENTRY alSource3i(ALuint source, ALenum param, ALint value1, ALint value2, ALint value3); +AL_API void AL_APIENTRY alSourceiv(ALuint source, ALenum param, const ALint *values); + +/* Get Source parameters. */ +AL_API void AL_APIENTRY alGetSourcef(ALuint source, ALenum param, ALfloat *value); +AL_API void AL_APIENTRY alGetSource3f(ALuint source, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +AL_API void AL_APIENTRY alGetSourcefv(ALuint source, ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetSourcei(ALuint source, ALenum param, ALint *value); +AL_API void AL_APIENTRY alGetSource3i(ALuint source, ALenum param, ALint *value1, ALint *value2, ALint *value3); +AL_API void AL_APIENTRY alGetSourceiv(ALuint source, ALenum param, ALint *values); + + +/** Play, replay, or resume (if paused) a list of Sources */ +AL_API void AL_APIENTRY alSourcePlayv(ALsizei n, const ALuint *sources); +/** Stop a list of Sources */ +AL_API void AL_APIENTRY alSourceStopv(ALsizei n, const ALuint *sources); +/** Rewind a list of Sources */ +AL_API void AL_APIENTRY alSourceRewindv(ALsizei n, const ALuint *sources); +/** Pause a list of Sources */ +AL_API void AL_APIENTRY alSourcePausev(ALsizei n, const ALuint *sources); + +/** Play, replay, or resume a Source */ +AL_API void AL_APIENTRY alSourcePlay(ALuint source); +/** Stop a Source */ +AL_API void AL_APIENTRY alSourceStop(ALuint source); +/** Rewind a Source (set playback postiton to beginning) */ +AL_API void AL_APIENTRY alSourceRewind(ALuint source); +/** Pause a Source */ +AL_API void AL_APIENTRY alSourcePause(ALuint source); + +/** Queue buffers onto a source */ +AL_API void AL_APIENTRY alSourceQueueBuffers(ALuint source, ALsizei nb, const ALuint *buffers); +/** Unqueue processed buffers from a source */ +AL_API void AL_APIENTRY alSourceUnqueueBuffers(ALuint source, ALsizei nb, ALuint *buffers); + + +/** Create Buffer objects */ +AL_API void AL_APIENTRY alGenBuffers(ALsizei n, ALuint *buffers); +/** Delete Buffer objects */ +AL_API void AL_APIENTRY alDeleteBuffers(ALsizei n, const ALuint *buffers); +/** Verify a handle is a valid Buffer */ +AL_API ALboolean AL_APIENTRY alIsBuffer(ALuint buffer); + +/** Specifies the data to be copied into a buffer */ +AL_API void AL_APIENTRY alBufferData(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq); + +/* Set Buffer parameters, */ +AL_API void AL_APIENTRY alBufferf(ALuint buffer, ALenum param, ALfloat value); +AL_API void AL_APIENTRY alBuffer3f(ALuint buffer, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +AL_API void AL_APIENTRY alBufferfv(ALuint buffer, ALenum param, const ALfloat *values); +AL_API void AL_APIENTRY alBufferi(ALuint buffer, ALenum param, ALint value); +AL_API void AL_APIENTRY alBuffer3i(ALuint buffer, ALenum param, ALint value1, ALint value2, ALint value3); +AL_API void AL_APIENTRY alBufferiv(ALuint buffer, ALenum param, const ALint *values); + +/* Get Buffer parameters. */ +AL_API void AL_APIENTRY alGetBufferf(ALuint buffer, ALenum param, ALfloat *value); +AL_API void AL_APIENTRY alGetBuffer3f(ALuint buffer, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +AL_API void AL_APIENTRY alGetBufferfv(ALuint buffer, ALenum param, ALfloat *values); +AL_API void AL_APIENTRY alGetBufferi(ALuint buffer, ALenum param, ALint *value); +AL_API void AL_APIENTRY alGetBuffer3i(ALuint buffer, ALenum param, ALint *value1, ALint *value2, ALint *value3); +AL_API void AL_APIENTRY alGetBufferiv(ALuint buffer, ALenum param, ALint *values); + +/* Pointer-to-function type, useful for dynamically getting AL entry points. */ +typedef void (AL_APIENTRY *LPALENABLE)(ALenum capability); +typedef void (AL_APIENTRY *LPALDISABLE)(ALenum capability); +typedef ALboolean (AL_APIENTRY *LPALISENABLED)(ALenum capability); +typedef const ALchar* (AL_APIENTRY *LPALGETSTRING)(ALenum param); +typedef void (AL_APIENTRY *LPALGETBOOLEANV)(ALenum param, ALboolean *values); +typedef void (AL_APIENTRY *LPALGETINTEGERV)(ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALGETFLOATV)(ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETDOUBLEV)(ALenum param, ALdouble *values); +typedef ALboolean (AL_APIENTRY *LPALGETBOOLEAN)(ALenum param); +typedef ALint (AL_APIENTRY *LPALGETINTEGER)(ALenum param); +typedef ALfloat (AL_APIENTRY *LPALGETFLOAT)(ALenum param); +typedef ALdouble (AL_APIENTRY *LPALGETDOUBLE)(ALenum param); +typedef ALenum (AL_APIENTRY *LPALGETERROR)(void); +typedef ALboolean (AL_APIENTRY *LPALISEXTENSIONPRESENT)(const ALchar *extname); +typedef void* (AL_APIENTRY *LPALGETPROCADDRESS)(const ALchar *fname); +typedef ALenum (AL_APIENTRY *LPALGETENUMVALUE)(const ALchar *ename); +typedef void (AL_APIENTRY *LPALLISTENERF)(ALenum param, ALfloat value); +typedef void (AL_APIENTRY *LPALLISTENER3F)(ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +typedef void (AL_APIENTRY *LPALLISTENERFV)(ALenum param, const ALfloat *values); +typedef void (AL_APIENTRY *LPALLISTENERI)(ALenum param, ALint value); +typedef void (AL_APIENTRY *LPALLISTENER3I)(ALenum param, ALint value1, ALint value2, ALint value3); +typedef void (AL_APIENTRY *LPALLISTENERIV)(ALenum param, const ALint *values); +typedef void (AL_APIENTRY *LPALGETLISTENERF)(ALenum param, ALfloat *value); +typedef void (AL_APIENTRY *LPALGETLISTENER3F)(ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +typedef void (AL_APIENTRY *LPALGETLISTENERFV)(ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETLISTENERI)(ALenum param, ALint *value); +typedef void (AL_APIENTRY *LPALGETLISTENER3I)(ALenum param, ALint *value1, ALint *value2, ALint *value3); +typedef void (AL_APIENTRY *LPALGETLISTENERIV)(ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALGENSOURCES)(ALsizei n, ALuint *sources); +typedef void (AL_APIENTRY *LPALDELETESOURCES)(ALsizei n, const ALuint *sources); +typedef ALboolean (AL_APIENTRY *LPALISSOURCE)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEF)(ALuint source, ALenum param, ALfloat value); +typedef void (AL_APIENTRY *LPALSOURCE3F)(ALuint source, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +typedef void (AL_APIENTRY *LPALSOURCEFV)(ALuint source, ALenum param, const ALfloat *values); +typedef void (AL_APIENTRY *LPALSOURCEI)(ALuint source, ALenum param, ALint value); +typedef void (AL_APIENTRY *LPALSOURCE3I)(ALuint source, ALenum param, ALint value1, ALint value2, ALint value3); +typedef void (AL_APIENTRY *LPALSOURCEIV)(ALuint source, ALenum param, const ALint *values); +typedef void (AL_APIENTRY *LPALGETSOURCEF)(ALuint source, ALenum param, ALfloat *value); +typedef void (AL_APIENTRY *LPALGETSOURCE3F)(ALuint source, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +typedef void (AL_APIENTRY *LPALGETSOURCEFV)(ALuint source, ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETSOURCEI)(ALuint source, ALenum param, ALint *value); +typedef void (AL_APIENTRY *LPALGETSOURCE3I)(ALuint source, ALenum param, ALint *value1, ALint *value2, ALint *value3); +typedef void (AL_APIENTRY *LPALGETSOURCEIV)(ALuint source, ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALSOURCEPLAYV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCESTOPV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCEREWINDV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCEPAUSEV)(ALsizei n, const ALuint *sources); +typedef void (AL_APIENTRY *LPALSOURCEPLAY)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCESTOP)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEREWIND)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEPAUSE)(ALuint source); +typedef void (AL_APIENTRY *LPALSOURCEQUEUEBUFFERS)(ALuint source, ALsizei nb, const ALuint *buffers); +typedef void (AL_APIENTRY *LPALSOURCEUNQUEUEBUFFERS)(ALuint source, ALsizei nb, ALuint *buffers); +typedef void (AL_APIENTRY *LPALGENBUFFERS)(ALsizei n, ALuint *buffers); +typedef void (AL_APIENTRY *LPALDELETEBUFFERS)(ALsizei n, const ALuint *buffers); +typedef ALboolean (AL_APIENTRY *LPALISBUFFER)(ALuint buffer); +typedef void (AL_APIENTRY *LPALBUFFERDATA)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq); +typedef void (AL_APIENTRY *LPALBUFFERF)(ALuint buffer, ALenum param, ALfloat value); +typedef void (AL_APIENTRY *LPALBUFFER3F)(ALuint buffer, ALenum param, ALfloat value1, ALfloat value2, ALfloat value3); +typedef void (AL_APIENTRY *LPALBUFFERFV)(ALuint buffer, ALenum param, const ALfloat *values); +typedef void (AL_APIENTRY *LPALBUFFERI)(ALuint buffer, ALenum param, ALint value); +typedef void (AL_APIENTRY *LPALBUFFER3I)(ALuint buffer, ALenum param, ALint value1, ALint value2, ALint value3); +typedef void (AL_APIENTRY *LPALBUFFERIV)(ALuint buffer, ALenum param, const ALint *values); +typedef void (AL_APIENTRY *LPALGETBUFFERF)(ALuint buffer, ALenum param, ALfloat *value); +typedef void (AL_APIENTRY *LPALGETBUFFER3F)(ALuint buffer, ALenum param, ALfloat *value1, ALfloat *value2, ALfloat *value3); +typedef void (AL_APIENTRY *LPALGETBUFFERFV)(ALuint buffer, ALenum param, ALfloat *values); +typedef void (AL_APIENTRY *LPALGETBUFFERI)(ALuint buffer, ALenum param, ALint *value); +typedef void (AL_APIENTRY *LPALGETBUFFER3I)(ALuint buffer, ALenum param, ALint *value1, ALint *value2, ALint *value3); +typedef void (AL_APIENTRY *LPALGETBUFFERIV)(ALuint buffer, ALenum param, ALint *values); +typedef void (AL_APIENTRY *LPALDOPPLERFACTOR)(ALfloat value); +typedef void (AL_APIENTRY *LPALDOPPLERVELOCITY)(ALfloat value); +typedef void (AL_APIENTRY *LPALSPEEDOFSOUND)(ALfloat value); +typedef void (AL_APIENTRY *LPALDISTANCEMODEL)(ALenum distanceModel); + +#if defined(__cplusplus) +} /* extern "C" */ +#endif + +#endif /* AL_AL_H */ diff --git a/3rdparty/OpenAL/include/alc.h b/3rdparty/OpenAL/include/alc.h new file mode 100644 index 0000000000..c73b6e9179 --- /dev/null +++ b/3rdparty/OpenAL/include/alc.h @@ -0,0 +1,270 @@ +#ifndef AL_ALC_H +#define AL_ALC_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#ifndef ALC_API + #if defined(AL_LIBTYPE_STATIC) + #define ALC_API + #elif defined(_WIN32) + #define ALC_API __declspec(dllimport) + #else + #define ALC_API extern + #endif +#endif + +#if defined(_WIN32) + #define ALC_APIENTRY __cdecl +#else + #define ALC_APIENTRY +#endif + + +/* Deprecated macros. */ +#define ALCAPI ALC_API +#define ALCAPIENTRY ALC_APIENTRY +#define ALC_INVALID 0 + +/** Supported ALC version? */ +#define ALC_VERSION_0_1 1 + +/** Opaque device handle */ +typedef struct ALCdevice ALCdevice; +/** Opaque context handle */ +typedef struct ALCcontext ALCcontext; + +/** 8-bit boolean */ +typedef char ALCboolean; + +/** character */ +typedef char ALCchar; + +/** signed 8-bit 2's complement integer */ +typedef signed char ALCbyte; + +/** unsigned 8-bit integer */ +typedef unsigned char ALCubyte; + +/** signed 16-bit 2's complement integer */ +typedef short ALCshort; + +/** unsigned 16-bit integer */ +typedef unsigned short ALCushort; + +/** signed 32-bit 2's complement integer */ +typedef int ALCint; + +/** unsigned 32-bit integer */ +typedef unsigned int ALCuint; + +/** non-negative 32-bit binary integer size */ +typedef int ALCsizei; + +/** enumerated 32-bit value */ +typedef int ALCenum; + +/** 32-bit IEEE754 floating-point */ +typedef float ALCfloat; + +/** 64-bit IEEE754 floating-point */ +typedef double ALCdouble; + +/** void type (for opaque pointers only) */ +typedef void ALCvoid; + + +/* Enumerant values begin at column 50. No tabs. */ + +/** Boolean False. */ +#define ALC_FALSE 0 + +/** Boolean True. */ +#define ALC_TRUE 1 + +/** Context attribute: Hz. */ +#define ALC_FREQUENCY 0x1007 + +/** Context attribute: Hz. */ +#define ALC_REFRESH 0x1008 + +/** Context attribute: AL_TRUE or AL_FALSE synchronous context? */ +#define ALC_SYNC 0x1009 + +/** Context attribute: requested Mono (3D) Sources. */ +#define ALC_MONO_SOURCES 0x1010 + +/** Context attribute: requested Stereo Sources. */ +#define ALC_STEREO_SOURCES 0x1011 + +/** No error. */ +#define ALC_NO_ERROR 0 + +/** Invalid device handle. */ +#define ALC_INVALID_DEVICE 0xA001 + +/** Invalid context handle. */ +#define ALC_INVALID_CONTEXT 0xA002 + +/** Invalid enum parameter passed to an ALC call. */ +#define ALC_INVALID_ENUM 0xA003 + +/** Invalid value parameter passed to an ALC call. */ +#define ALC_INVALID_VALUE 0xA004 + +/** Out of memory. */ +#define ALC_OUT_OF_MEMORY 0xA005 + + +/** Runtime ALC major version. */ +#define ALC_MAJOR_VERSION 0x1000 +/** Runtime ALC minor version. */ +#define ALC_MINOR_VERSION 0x1001 + +/** Context attribute list size. */ +#define ALC_ATTRIBUTES_SIZE 0x1002 +/** Context attribute list properties. */ +#define ALC_ALL_ATTRIBUTES 0x1003 + +/** String for the default device specifier. */ +#define ALC_DEFAULT_DEVICE_SPECIFIER 0x1004 +/** + * String for the given device's specifier. + * + * If device handle is NULL, it is instead a null-char separated list of + * strings of known device specifiers (list ends with an empty string). + */ +#define ALC_DEVICE_SPECIFIER 0x1005 +/** String for space-separated list of ALC extensions. */ +#define ALC_EXTENSIONS 0x1006 + + +/** Capture extension */ +#define ALC_EXT_CAPTURE 1 +/** + * String for the given capture device's specifier. + * + * If device handle is NULL, it is instead a null-char separated list of + * strings of known capture device specifiers (list ends with an empty string). + */ +#define ALC_CAPTURE_DEVICE_SPECIFIER 0x310 +/** String for the default capture device specifier. */ +#define ALC_CAPTURE_DEFAULT_DEVICE_SPECIFIER 0x311 +/** Number of sample frames available for capture. */ +#define ALC_CAPTURE_SAMPLES 0x312 + + +/** Enumerate All extension */ +#define ALC_ENUMERATE_ALL_EXT 1 +/** String for the default extended device specifier. */ +#define ALC_DEFAULT_ALL_DEVICES_SPECIFIER 0x1012 +/** + * String for the given extended device's specifier. + * + * If device handle is NULL, it is instead a null-char separated list of + * strings of known extended device specifiers (list ends with an empty string). + */ +#define ALC_ALL_DEVICES_SPECIFIER 0x1013 + + +/* Context management. */ + +/** Create and attach a context to the given device. */ +ALC_API ALCcontext* ALC_APIENTRY alcCreateContext(ALCdevice *device, const ALCint *attrlist); +/** + * Makes the given context the active process-wide context. Passing NULL clears + * the active context. + */ +ALC_API ALCboolean ALC_APIENTRY alcMakeContextCurrent(ALCcontext *context); +/** Resumes processing updates for the given context. */ +ALC_API void ALC_APIENTRY alcProcessContext(ALCcontext *context); +/** Suspends updates for the given context. */ +ALC_API void ALC_APIENTRY alcSuspendContext(ALCcontext *context); +/** Remove a context from its device and destroys it. */ +ALC_API void ALC_APIENTRY alcDestroyContext(ALCcontext *context); +/** Returns the currently active context. */ +ALC_API ALCcontext* ALC_APIENTRY alcGetCurrentContext(void); +/** Returns the device that a particular context is attached to. */ +ALC_API ALCdevice* ALC_APIENTRY alcGetContextsDevice(ALCcontext *context); + +/* Device management. */ + +/** Opens the named playback device. */ +ALC_API ALCdevice* ALC_APIENTRY alcOpenDevice(const ALCchar *devicename); +/** Closes the given playback device. */ +ALC_API ALCboolean ALC_APIENTRY alcCloseDevice(ALCdevice *device); + +/* Error support. */ + +/** Obtain the most recent Device error. */ +ALC_API ALCenum ALC_APIENTRY alcGetError(ALCdevice *device); + +/* Extension support. */ + +/** + * Query for the presence of an extension on the device. Pass a NULL device to + * query a device-inspecific extension. + */ +ALC_API ALCboolean ALC_APIENTRY alcIsExtensionPresent(ALCdevice *device, const ALCchar *extname); +/** + * Retrieve the address of a function. Given a non-NULL device, the returned + * function may be device-specific. + */ +ALC_API ALCvoid* ALC_APIENTRY alcGetProcAddress(ALCdevice *device, const ALCchar *funcname); +/** + * Retrieve the value of an enum. Given a non-NULL device, the returned value + * may be device-specific. + */ +ALC_API ALCenum ALC_APIENTRY alcGetEnumValue(ALCdevice *device, const ALCchar *enumname); + +/* Query functions. */ + +/** Returns information about the device, and error strings. */ +ALC_API const ALCchar* ALC_APIENTRY alcGetString(ALCdevice *device, ALCenum param); +/** Returns information about the device and the version of OpenAL. */ +ALC_API void ALC_APIENTRY alcGetIntegerv(ALCdevice *device, ALCenum param, ALCsizei size, ALCint *values); + +/* Capture functions. */ + +/** + * Opens the named capture device with the given frequency, format, and buffer + * size. + */ +ALC_API ALCdevice* ALC_APIENTRY alcCaptureOpenDevice(const ALCchar *devicename, ALCuint frequency, ALCenum format, ALCsizei buffersize); +/** Closes the given capture device. */ +ALC_API ALCboolean ALC_APIENTRY alcCaptureCloseDevice(ALCdevice *device); +/** Starts capturing samples into the device buffer. */ +ALC_API void ALC_APIENTRY alcCaptureStart(ALCdevice *device); +/** Stops capturing samples. Samples in the device buffer remain available. */ +ALC_API void ALC_APIENTRY alcCaptureStop(ALCdevice *device); +/** Reads samples from the device buffer. */ +ALC_API void ALC_APIENTRY alcCaptureSamples(ALCdevice *device, ALCvoid *buffer, ALCsizei samples); + +/* Pointer-to-function type, useful for dynamically getting ALC entry points. */ +typedef ALCcontext* (ALC_APIENTRY *LPALCCREATECONTEXT)(ALCdevice *device, const ALCint *attrlist); +typedef ALCboolean (ALC_APIENTRY *LPALCMAKECONTEXTCURRENT)(ALCcontext *context); +typedef void (ALC_APIENTRY *LPALCPROCESSCONTEXT)(ALCcontext *context); +typedef void (ALC_APIENTRY *LPALCSUSPENDCONTEXT)(ALCcontext *context); +typedef void (ALC_APIENTRY *LPALCDESTROYCONTEXT)(ALCcontext *context); +typedef ALCcontext* (ALC_APIENTRY *LPALCGETCURRENTCONTEXT)(void); +typedef ALCdevice* (ALC_APIENTRY *LPALCGETCONTEXTSDEVICE)(ALCcontext *context); +typedef ALCdevice* (ALC_APIENTRY *LPALCOPENDEVICE)(const ALCchar *devicename); +typedef ALCboolean (ALC_APIENTRY *LPALCCLOSEDEVICE)(ALCdevice *device); +typedef ALCenum (ALC_APIENTRY *LPALCGETERROR)(ALCdevice *device); +typedef ALCboolean (ALC_APIENTRY *LPALCISEXTENSIONPRESENT)(ALCdevice *device, const ALCchar *extname); +typedef ALCvoid* (ALC_APIENTRY *LPALCGETPROCADDRESS)(ALCdevice *device, const ALCchar *funcname); +typedef ALCenum (ALC_APIENTRY *LPALCGETENUMVALUE)(ALCdevice *device, const ALCchar *enumname); +typedef const ALCchar* (ALC_APIENTRY *LPALCGETSTRING)(ALCdevice *device, ALCenum param); +typedef void (ALC_APIENTRY *LPALCGETINTEGERV)(ALCdevice *device, ALCenum param, ALCsizei size, ALCint *values); +typedef ALCdevice* (ALC_APIENTRY *LPALCCAPTUREOPENDEVICE)(const ALCchar *devicename, ALCuint frequency, ALCenum format, ALCsizei buffersize); +typedef ALCboolean (ALC_APIENTRY *LPALCCAPTURECLOSEDEVICE)(ALCdevice *device); +typedef void (ALC_APIENTRY *LPALCCAPTURESTART)(ALCdevice *device); +typedef void (ALC_APIENTRY *LPALCCAPTURESTOP)(ALCdevice *device); +typedef void (ALC_APIENTRY *LPALCCAPTURESAMPLES)(ALCdevice *device, ALCvoid *buffer, ALCsizei samples); + +#if defined(__cplusplus) +} +#endif + +#endif /* AL_ALC_H */ diff --git a/3rdparty/OpenAL/include/alext.h b/3rdparty/OpenAL/include/alext.h new file mode 100644 index 0000000000..f80b0708ab --- /dev/null +++ b/3rdparty/OpenAL/include/alext.h @@ -0,0 +1,586 @@ +/** + * OpenAL cross platform audio library + * Copyright (C) 2008 by authors. + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * Or go to http://www.gnu.org/copyleft/lgpl.html + */ + +#ifndef AL_ALEXT_H +#define AL_ALEXT_H + +#include +/* Define int64 and uint64 types */ +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \ + (defined(__cplusplus) && __cplusplus >= 201103L) +#include +typedef int64_t _alsoft_int64_t; +typedef uint64_t _alsoft_uint64_t; +#elif defined(_WIN32) +typedef __int64 _alsoft_int64_t; +typedef unsigned __int64 _alsoft_uint64_t; +#else +/* Fallback if nothing above works */ +#include +typedef int64_t _alsoft_int64_t; +typedef uint64_t _alsoft_uint64_t; +#endif + +#include "alc.h" +#include "al.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef AL_LOKI_IMA_ADPCM_format +#define AL_LOKI_IMA_ADPCM_format 1 +#define AL_FORMAT_IMA_ADPCM_MONO16_EXT 0x10000 +#define AL_FORMAT_IMA_ADPCM_STEREO16_EXT 0x10001 +#endif + +#ifndef AL_LOKI_WAVE_format +#define AL_LOKI_WAVE_format 1 +#define AL_FORMAT_WAVE_EXT 0x10002 +#endif + +#ifndef AL_EXT_vorbis +#define AL_EXT_vorbis 1 +#define AL_FORMAT_VORBIS_EXT 0x10003 +#endif + +#ifndef AL_LOKI_quadriphonic +#define AL_LOKI_quadriphonic 1 +#define AL_FORMAT_QUAD8_LOKI 0x10004 +#define AL_FORMAT_QUAD16_LOKI 0x10005 +#endif + +#ifndef AL_EXT_float32 +#define AL_EXT_float32 1 +#define AL_FORMAT_MONO_FLOAT32 0x10010 +#define AL_FORMAT_STEREO_FLOAT32 0x10011 +#endif + +#ifndef AL_EXT_double +#define AL_EXT_double 1 +#define AL_FORMAT_MONO_DOUBLE_EXT 0x10012 +#define AL_FORMAT_STEREO_DOUBLE_EXT 0x10013 +#endif + +#ifndef AL_EXT_MULAW +#define AL_EXT_MULAW 1 +#define AL_FORMAT_MONO_MULAW_EXT 0x10014 +#define AL_FORMAT_STEREO_MULAW_EXT 0x10015 +#endif + +#ifndef AL_EXT_ALAW +#define AL_EXT_ALAW 1 +#define AL_FORMAT_MONO_ALAW_EXT 0x10016 +#define AL_FORMAT_STEREO_ALAW_EXT 0x10017 +#endif + +#ifndef ALC_LOKI_audio_channel +#define ALC_LOKI_audio_channel 1 +#define ALC_CHAN_MAIN_LOKI 0x500001 +#define ALC_CHAN_PCM_LOKI 0x500002 +#define ALC_CHAN_CD_LOKI 0x500003 +#endif + +#ifndef AL_EXT_MCFORMATS +#define AL_EXT_MCFORMATS 1 +/* Provides support for surround sound buffer formats with 8, 16, and 32-bit + * samples. + * + * QUAD8: Unsigned 8-bit, Quadraphonic (Front Left, Front Right, Rear Left, + * Rear Right). + * QUAD16: Signed 16-bit, Quadraphonic. + * QUAD32: 32-bit float, Quadraphonic. + * REAR8: Unsigned 8-bit, Rear Stereo (Rear Left, Rear Right). + * REAR16: Signed 16-bit, Rear Stereo. + * REAR32: 32-bit float, Rear Stereo. + * 51CHN8: Unsigned 8-bit, 5.1 Surround (Front Left, Front Right, Front Center, + * LFE, Side Left, Side Right). Note that some audio systems may label + * 5.1's Side channels as Rear or Surround; they are equivalent for the + * purposes of this extension. + * 51CHN16: Signed 16-bit, 5.1 Surround. + * 51CHN32: 32-bit float, 5.1 Surround. + * 61CHN8: Unsigned 8-bit, 6.1 Surround (Front Left, Front Right, Front Center, + * LFE, Rear Center, Side Left, Side Right). + * 61CHN16: Signed 16-bit, 6.1 Surround. + * 61CHN32: 32-bit float, 6.1 Surround. + * 71CHN8: Unsigned 8-bit, 7.1 Surround (Front Left, Front Right, Front Center, + * LFE, Rear Left, Rear Right, Side Left, Side Right). + * 71CHN16: Signed 16-bit, 7.1 Surround. + * 71CHN32: 32-bit float, 7.1 Surround. + */ +#define AL_FORMAT_QUAD8 0x1204 +#define AL_FORMAT_QUAD16 0x1205 +#define AL_FORMAT_QUAD32 0x1206 +#define AL_FORMAT_REAR8 0x1207 +#define AL_FORMAT_REAR16 0x1208 +#define AL_FORMAT_REAR32 0x1209 +#define AL_FORMAT_51CHN8 0x120A +#define AL_FORMAT_51CHN16 0x120B +#define AL_FORMAT_51CHN32 0x120C +#define AL_FORMAT_61CHN8 0x120D +#define AL_FORMAT_61CHN16 0x120E +#define AL_FORMAT_61CHN32 0x120F +#define AL_FORMAT_71CHN8 0x1210 +#define AL_FORMAT_71CHN16 0x1211 +#define AL_FORMAT_71CHN32 0x1212 +#endif + +#ifndef AL_EXT_MULAW_MCFORMATS +#define AL_EXT_MULAW_MCFORMATS 1 +#define AL_FORMAT_MONO_MULAW 0x10014 +#define AL_FORMAT_STEREO_MULAW 0x10015 +#define AL_FORMAT_QUAD_MULAW 0x10021 +#define AL_FORMAT_REAR_MULAW 0x10022 +#define AL_FORMAT_51CHN_MULAW 0x10023 +#define AL_FORMAT_61CHN_MULAW 0x10024 +#define AL_FORMAT_71CHN_MULAW 0x10025 +#endif + +#ifndef AL_EXT_IMA4 +#define AL_EXT_IMA4 1 +#define AL_FORMAT_MONO_IMA4 0x1300 +#define AL_FORMAT_STEREO_IMA4 0x1301 +#endif + +#ifndef AL_EXT_STATIC_BUFFER +#define AL_EXT_STATIC_BUFFER 1 +typedef void (AL_APIENTRY*PFNALBUFFERDATASTATICPROC)(const ALint,ALenum,ALvoid*,ALsizei,ALsizei); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alBufferDataStatic(const ALint buffer, ALenum format, ALvoid *data, ALsizei len, ALsizei freq); +#endif +#endif + +#ifndef ALC_EXT_EFX +#define ALC_EXT_EFX 1 +#include "efx.h" +#endif + +#ifndef ALC_EXT_disconnect +#define ALC_EXT_disconnect 1 +#define ALC_CONNECTED 0x313 +#endif + +#ifndef ALC_EXT_thread_local_context +#define ALC_EXT_thread_local_context 1 +typedef ALCboolean (ALC_APIENTRY*PFNALCSETTHREADCONTEXTPROC)(ALCcontext *context); +typedef ALCcontext* (ALC_APIENTRY*PFNALCGETTHREADCONTEXTPROC)(void); +#ifdef AL_ALEXT_PROTOTYPES +ALC_API ALCboolean ALC_APIENTRY alcSetThreadContext(ALCcontext *context); +ALC_API ALCcontext* ALC_APIENTRY alcGetThreadContext(void); +#endif +#endif + +#ifndef AL_EXT_source_distance_model +#define AL_EXT_source_distance_model 1 +#define AL_SOURCE_DISTANCE_MODEL 0x200 +#endif + +#ifndef AL_SOFT_buffer_sub_data +#define AL_SOFT_buffer_sub_data 1 +#define AL_BYTE_RW_OFFSETS_SOFT 0x1031 +#define AL_SAMPLE_RW_OFFSETS_SOFT 0x1032 +typedef void (AL_APIENTRY*PFNALBUFFERSUBDATASOFTPROC)(ALuint,ALenum,const ALvoid*,ALsizei,ALsizei); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alBufferSubDataSOFT(ALuint buffer,ALenum format,const ALvoid *data,ALsizei offset,ALsizei length); +#endif +#endif + +#ifndef AL_SOFT_loop_points +#define AL_SOFT_loop_points 1 +#define AL_LOOP_POINTS_SOFT 0x2015 +#endif + +#ifndef AL_EXT_FOLDBACK +#define AL_EXT_FOLDBACK 1 +#define AL_EXT_FOLDBACK_NAME "AL_EXT_FOLDBACK" +#define AL_FOLDBACK_EVENT_BLOCK 0x4112 +#define AL_FOLDBACK_EVENT_START 0x4111 +#define AL_FOLDBACK_EVENT_STOP 0x4113 +#define AL_FOLDBACK_MODE_MONO 0x4101 +#define AL_FOLDBACK_MODE_STEREO 0x4102 +typedef void (AL_APIENTRY*LPALFOLDBACKCALLBACK)(ALenum,ALsizei); +typedef void (AL_APIENTRY*LPALREQUESTFOLDBACKSTART)(ALenum,ALsizei,ALsizei,ALfloat*,LPALFOLDBACKCALLBACK); +typedef void (AL_APIENTRY*LPALREQUESTFOLDBACKSTOP)(void); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alRequestFoldbackStart(ALenum mode,ALsizei count,ALsizei length,ALfloat *mem,LPALFOLDBACKCALLBACK callback); +AL_API void AL_APIENTRY alRequestFoldbackStop(void); +#endif +#endif + +#ifndef ALC_EXT_DEDICATED +#define ALC_EXT_DEDICATED 1 +#define AL_DEDICATED_GAIN 0x0001 +#define AL_EFFECT_DEDICATED_DIALOGUE 0x9001 +#define AL_EFFECT_DEDICATED_LOW_FREQUENCY_EFFECT 0x9000 +#endif + +#ifndef AL_SOFT_buffer_samples +#define AL_SOFT_buffer_samples 1 +/* Channel configurations */ +#define AL_MONO_SOFT 0x1500 +#define AL_STEREO_SOFT 0x1501 +#define AL_REAR_SOFT 0x1502 +#define AL_QUAD_SOFT 0x1503 +#define AL_5POINT1_SOFT 0x1504 +#define AL_6POINT1_SOFT 0x1505 +#define AL_7POINT1_SOFT 0x1506 + +/* Sample types */ +#define AL_BYTE_SOFT 0x1400 +#define AL_UNSIGNED_BYTE_SOFT 0x1401 +#define AL_SHORT_SOFT 0x1402 +#define AL_UNSIGNED_SHORT_SOFT 0x1403 +#define AL_INT_SOFT 0x1404 +#define AL_UNSIGNED_INT_SOFT 0x1405 +#define AL_FLOAT_SOFT 0x1406 +#define AL_DOUBLE_SOFT 0x1407 +#define AL_BYTE3_SOFT 0x1408 +#define AL_UNSIGNED_BYTE3_SOFT 0x1409 + +/* Storage formats */ +#define AL_MONO8_SOFT 0x1100 +#define AL_MONO16_SOFT 0x1101 +#define AL_MONO32F_SOFT 0x10010 +#define AL_STEREO8_SOFT 0x1102 +#define AL_STEREO16_SOFT 0x1103 +#define AL_STEREO32F_SOFT 0x10011 +#define AL_QUAD8_SOFT 0x1204 +#define AL_QUAD16_SOFT 0x1205 +#define AL_QUAD32F_SOFT 0x1206 +#define AL_REAR8_SOFT 0x1207 +#define AL_REAR16_SOFT 0x1208 +#define AL_REAR32F_SOFT 0x1209 +#define AL_5POINT1_8_SOFT 0x120A +#define AL_5POINT1_16_SOFT 0x120B +#define AL_5POINT1_32F_SOFT 0x120C +#define AL_6POINT1_8_SOFT 0x120D +#define AL_6POINT1_16_SOFT 0x120E +#define AL_6POINT1_32F_SOFT 0x120F +#define AL_7POINT1_8_SOFT 0x1210 +#define AL_7POINT1_16_SOFT 0x1211 +#define AL_7POINT1_32F_SOFT 0x1212 + +/* Buffer attributes */ +#define AL_INTERNAL_FORMAT_SOFT 0x2008 +#define AL_BYTE_LENGTH_SOFT 0x2009 +#define AL_SAMPLE_LENGTH_SOFT 0x200A +#define AL_SEC_LENGTH_SOFT 0x200B + +typedef void (AL_APIENTRY*LPALBUFFERSAMPLESSOFT)(ALuint,ALuint,ALenum,ALsizei,ALenum,ALenum,const ALvoid*); +typedef void (AL_APIENTRY*LPALBUFFERSUBSAMPLESSOFT)(ALuint,ALsizei,ALsizei,ALenum,ALenum,const ALvoid*); +typedef void (AL_APIENTRY*LPALGETBUFFERSAMPLESSOFT)(ALuint,ALsizei,ALsizei,ALenum,ALenum,ALvoid*); +typedef ALboolean (AL_APIENTRY*LPALISBUFFERFORMATSUPPORTEDSOFT)(ALenum); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alBufferSamplesSOFT(ALuint buffer, ALuint samplerate, ALenum internalformat, ALsizei samples, ALenum channels, ALenum type, const ALvoid *data); +AL_API void AL_APIENTRY alBufferSubSamplesSOFT(ALuint buffer, ALsizei offset, ALsizei samples, ALenum channels, ALenum type, const ALvoid *data); +AL_API void AL_APIENTRY alGetBufferSamplesSOFT(ALuint buffer, ALsizei offset, ALsizei samples, ALenum channels, ALenum type, ALvoid *data); +AL_API ALboolean AL_APIENTRY alIsBufferFormatSupportedSOFT(ALenum format); +#endif +#endif + +#ifndef AL_SOFT_direct_channels +#define AL_SOFT_direct_channels 1 +#define AL_DIRECT_CHANNELS_SOFT 0x1033 +#endif + +#ifndef ALC_SOFT_loopback +#define ALC_SOFT_loopback 1 +#define ALC_FORMAT_CHANNELS_SOFT 0x1990 +#define ALC_FORMAT_TYPE_SOFT 0x1991 + +/* Sample types */ +#define ALC_BYTE_SOFT 0x1400 +#define ALC_UNSIGNED_BYTE_SOFT 0x1401 +#define ALC_SHORT_SOFT 0x1402 +#define ALC_UNSIGNED_SHORT_SOFT 0x1403 +#define ALC_INT_SOFT 0x1404 +#define ALC_UNSIGNED_INT_SOFT 0x1405 +#define ALC_FLOAT_SOFT 0x1406 + +/* Channel configurations */ +#define ALC_MONO_SOFT 0x1500 +#define ALC_STEREO_SOFT 0x1501 +#define ALC_QUAD_SOFT 0x1503 +#define ALC_5POINT1_SOFT 0x1504 +#define ALC_6POINT1_SOFT 0x1505 +#define ALC_7POINT1_SOFT 0x1506 + +typedef ALCdevice* (ALC_APIENTRY*LPALCLOOPBACKOPENDEVICESOFT)(const ALCchar*); +typedef ALCboolean (ALC_APIENTRY*LPALCISRENDERFORMATSUPPORTEDSOFT)(ALCdevice*,ALCsizei,ALCenum,ALCenum); +typedef void (ALC_APIENTRY*LPALCRENDERSAMPLESSOFT)(ALCdevice*,ALCvoid*,ALCsizei); +#ifdef AL_ALEXT_PROTOTYPES +ALC_API ALCdevice* ALC_APIENTRY alcLoopbackOpenDeviceSOFT(const ALCchar *deviceName); +ALC_API ALCboolean ALC_APIENTRY alcIsRenderFormatSupportedSOFT(ALCdevice *device, ALCsizei freq, ALCenum channels, ALCenum type); +ALC_API void ALC_APIENTRY alcRenderSamplesSOFT(ALCdevice *device, ALCvoid *buffer, ALCsizei samples); +#endif +#endif + +#ifndef AL_EXT_STEREO_ANGLES +#define AL_EXT_STEREO_ANGLES 1 +#define AL_STEREO_ANGLES 0x1030 +#endif + +#ifndef AL_EXT_SOURCE_RADIUS +#define AL_EXT_SOURCE_RADIUS 1 +#define AL_SOURCE_RADIUS 0x1031 +#endif + +#ifndef AL_SOFT_source_latency +#define AL_SOFT_source_latency 1 +#define AL_SAMPLE_OFFSET_LATENCY_SOFT 0x1200 +#define AL_SEC_OFFSET_LATENCY_SOFT 0x1201 +typedef _alsoft_int64_t ALint64SOFT; +typedef _alsoft_uint64_t ALuint64SOFT; +typedef void (AL_APIENTRY*LPALSOURCEDSOFT)(ALuint,ALenum,ALdouble); +typedef void (AL_APIENTRY*LPALSOURCE3DSOFT)(ALuint,ALenum,ALdouble,ALdouble,ALdouble); +typedef void (AL_APIENTRY*LPALSOURCEDVSOFT)(ALuint,ALenum,const ALdouble*); +typedef void (AL_APIENTRY*LPALGETSOURCEDSOFT)(ALuint,ALenum,ALdouble*); +typedef void (AL_APIENTRY*LPALGETSOURCE3DSOFT)(ALuint,ALenum,ALdouble*,ALdouble*,ALdouble*); +typedef void (AL_APIENTRY*LPALGETSOURCEDVSOFT)(ALuint,ALenum,ALdouble*); +typedef void (AL_APIENTRY*LPALSOURCEI64SOFT)(ALuint,ALenum,ALint64SOFT); +typedef void (AL_APIENTRY*LPALSOURCE3I64SOFT)(ALuint,ALenum,ALint64SOFT,ALint64SOFT,ALint64SOFT); +typedef void (AL_APIENTRY*LPALSOURCEI64VSOFT)(ALuint,ALenum,const ALint64SOFT*); +typedef void (AL_APIENTRY*LPALGETSOURCEI64SOFT)(ALuint,ALenum,ALint64SOFT*); +typedef void (AL_APIENTRY*LPALGETSOURCE3I64SOFT)(ALuint,ALenum,ALint64SOFT*,ALint64SOFT*,ALint64SOFT*); +typedef void (AL_APIENTRY*LPALGETSOURCEI64VSOFT)(ALuint,ALenum,ALint64SOFT*); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alSourcedSOFT(ALuint source, ALenum param, ALdouble value); +AL_API void AL_APIENTRY alSource3dSOFT(ALuint source, ALenum param, ALdouble value1, ALdouble value2, ALdouble value3); +AL_API void AL_APIENTRY alSourcedvSOFT(ALuint source, ALenum param, const ALdouble *values); +AL_API void AL_APIENTRY alGetSourcedSOFT(ALuint source, ALenum param, ALdouble *value); +AL_API void AL_APIENTRY alGetSource3dSOFT(ALuint source, ALenum param, ALdouble *value1, ALdouble *value2, ALdouble *value3); +AL_API void AL_APIENTRY alGetSourcedvSOFT(ALuint source, ALenum param, ALdouble *values); +AL_API void AL_APIENTRY alSourcei64SOFT(ALuint source, ALenum param, ALint64SOFT value); +AL_API void AL_APIENTRY alSource3i64SOFT(ALuint source, ALenum param, ALint64SOFT value1, ALint64SOFT value2, ALint64SOFT value3); +AL_API void AL_APIENTRY alSourcei64vSOFT(ALuint source, ALenum param, const ALint64SOFT *values); +AL_API void AL_APIENTRY alGetSourcei64SOFT(ALuint source, ALenum param, ALint64SOFT *value); +AL_API void AL_APIENTRY alGetSource3i64SOFT(ALuint source, ALenum param, ALint64SOFT *value1, ALint64SOFT *value2, ALint64SOFT *value3); +AL_API void AL_APIENTRY alGetSourcei64vSOFT(ALuint source, ALenum param, ALint64SOFT *values); +#endif +#endif + +#ifndef ALC_EXT_DEFAULT_FILTER_ORDER +#define ALC_EXT_DEFAULT_FILTER_ORDER 1 +#define ALC_DEFAULT_FILTER_ORDER 0x1100 +#endif + +#ifndef AL_SOFT_deferred_updates +#define AL_SOFT_deferred_updates 1 +#define AL_DEFERRED_UPDATES_SOFT 0xC002 +typedef void (AL_APIENTRY*LPALDEFERUPDATESSOFT)(void); +typedef void (AL_APIENTRY*LPALPROCESSUPDATESSOFT)(void); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alDeferUpdatesSOFT(void); +AL_API void AL_APIENTRY alProcessUpdatesSOFT(void); +#endif +#endif + +#ifndef AL_SOFT_block_alignment +#define AL_SOFT_block_alignment 1 +#define AL_UNPACK_BLOCK_ALIGNMENT_SOFT 0x200C +#define AL_PACK_BLOCK_ALIGNMENT_SOFT 0x200D +#endif + +#ifndef AL_SOFT_MSADPCM +#define AL_SOFT_MSADPCM 1 +#define AL_FORMAT_MONO_MSADPCM_SOFT 0x1302 +#define AL_FORMAT_STEREO_MSADPCM_SOFT 0x1303 +#endif + +#ifndef AL_SOFT_source_length +#define AL_SOFT_source_length 1 +/*#define AL_BYTE_LENGTH_SOFT 0x2009*/ +/*#define AL_SAMPLE_LENGTH_SOFT 0x200A*/ +/*#define AL_SEC_LENGTH_SOFT 0x200B*/ +#endif + +#ifndef ALC_SOFT_pause_device +#define ALC_SOFT_pause_device 1 +typedef void (ALC_APIENTRY*LPALCDEVICEPAUSESOFT)(ALCdevice *device); +typedef void (ALC_APIENTRY*LPALCDEVICERESUMESOFT)(ALCdevice *device); +#ifdef AL_ALEXT_PROTOTYPES +ALC_API void ALC_APIENTRY alcDevicePauseSOFT(ALCdevice *device); +ALC_API void ALC_APIENTRY alcDeviceResumeSOFT(ALCdevice *device); +#endif +#endif + +#ifndef AL_EXT_BFORMAT +#define AL_EXT_BFORMAT 1 +/* Provides support for B-Format ambisonic buffers (first-order, FuMa scaling + * and layout). + * + * BFORMAT2D_8: Unsigned 8-bit, 3-channel non-periphonic (WXY). + * BFORMAT2D_16: Signed 16-bit, 3-channel non-periphonic (WXY). + * BFORMAT2D_FLOAT32: 32-bit float, 3-channel non-periphonic (WXY). + * BFORMAT3D_8: Unsigned 8-bit, 4-channel periphonic (WXYZ). + * BFORMAT3D_16: Signed 16-bit, 4-channel periphonic (WXYZ). + * BFORMAT3D_FLOAT32: 32-bit float, 4-channel periphonic (WXYZ). + */ +#define AL_FORMAT_BFORMAT2D_8 0x20021 +#define AL_FORMAT_BFORMAT2D_16 0x20022 +#define AL_FORMAT_BFORMAT2D_FLOAT32 0x20023 +#define AL_FORMAT_BFORMAT3D_8 0x20031 +#define AL_FORMAT_BFORMAT3D_16 0x20032 +#define AL_FORMAT_BFORMAT3D_FLOAT32 0x20033 +#endif + +#ifndef AL_EXT_MULAW_BFORMAT +#define AL_EXT_MULAW_BFORMAT 1 +#define AL_FORMAT_BFORMAT2D_MULAW 0x10031 +#define AL_FORMAT_BFORMAT3D_MULAW 0x10032 +#endif + +#ifndef ALC_SOFT_HRTF +#define ALC_SOFT_HRTF 1 +#define ALC_HRTF_SOFT 0x1992 +#define ALC_DONT_CARE_SOFT 0x0002 +#define ALC_HRTF_STATUS_SOFT 0x1993 +#define ALC_HRTF_DISABLED_SOFT 0x0000 +#define ALC_HRTF_ENABLED_SOFT 0x0001 +#define ALC_HRTF_DENIED_SOFT 0x0002 +#define ALC_HRTF_REQUIRED_SOFT 0x0003 +#define ALC_HRTF_HEADPHONES_DETECTED_SOFT 0x0004 +#define ALC_HRTF_UNSUPPORTED_FORMAT_SOFT 0x0005 +#define ALC_NUM_HRTF_SPECIFIERS_SOFT 0x1994 +#define ALC_HRTF_SPECIFIER_SOFT 0x1995 +#define ALC_HRTF_ID_SOFT 0x1996 +typedef const ALCchar* (ALC_APIENTRY*LPALCGETSTRINGISOFT)(ALCdevice *device, ALCenum paramName, ALCsizei index); +typedef ALCboolean (ALC_APIENTRY*LPALCRESETDEVICESOFT)(ALCdevice *device, const ALCint *attribs); +#ifdef AL_ALEXT_PROTOTYPES +ALC_API const ALCchar* ALC_APIENTRY alcGetStringiSOFT(ALCdevice *device, ALCenum paramName, ALCsizei index); +ALC_API ALCboolean ALC_APIENTRY alcResetDeviceSOFT(ALCdevice *device, const ALCint *attribs); +#endif +#endif + +#ifndef AL_SOFT_gain_clamp_ex +#define AL_SOFT_gain_clamp_ex 1 +#define AL_GAIN_LIMIT_SOFT 0x200E +#endif + +#ifndef AL_SOFT_source_resampler +#define AL_SOFT_source_resampler +#define AL_NUM_RESAMPLERS_SOFT 0x1210 +#define AL_DEFAULT_RESAMPLER_SOFT 0x1211 +#define AL_SOURCE_RESAMPLER_SOFT 0x1212 +#define AL_RESAMPLER_NAME_SOFT 0x1213 +typedef const ALchar* (AL_APIENTRY*LPALGETSTRINGISOFT)(ALenum pname, ALsizei index); +#ifdef AL_ALEXT_PROTOTYPES +AL_API const ALchar* AL_APIENTRY alGetStringiSOFT(ALenum pname, ALsizei index); +#endif +#endif + +#ifndef AL_SOFT_source_spatialize +#define AL_SOFT_source_spatialize +#define AL_SOURCE_SPATIALIZE_SOFT 0x1214 +#define AL_AUTO_SOFT 0x0002 +#endif + +#ifndef ALC_SOFT_output_limiter +#define ALC_SOFT_output_limiter +#define ALC_OUTPUT_LIMITER_SOFT 0x199A +#endif + +#ifndef ALC_SOFT_device_clock +#define ALC_SOFT_device_clock 1 +typedef _alsoft_int64_t ALCint64SOFT; +typedef _alsoft_uint64_t ALCuint64SOFT; +#define ALC_DEVICE_CLOCK_SOFT 0x1600 +#define ALC_DEVICE_LATENCY_SOFT 0x1601 +#define ALC_DEVICE_CLOCK_LATENCY_SOFT 0x1602 +#define AL_SAMPLE_OFFSET_CLOCK_SOFT 0x1202 +#define AL_SEC_OFFSET_CLOCK_SOFT 0x1203 +typedef void (ALC_APIENTRY*LPALCGETINTEGER64VSOFT)(ALCdevice *device, ALCenum pname, ALsizei size, ALCint64SOFT *values); +#ifdef AL_ALEXT_PROTOTYPES +ALC_API void ALC_APIENTRY alcGetInteger64vSOFT(ALCdevice *device, ALCenum pname, ALsizei size, ALCint64SOFT *values); +#endif +#endif + +#ifndef AL_SOFT_direct_channels_remix +#define AL_SOFT_direct_channels_remix 1 +#define AL_DROP_UNMATCHED_SOFT 0x0001 +#define AL_REMIX_UNMATCHED_SOFT 0x0002 +#endif + +#ifndef AL_SOFT_bformat_ex +#define AL_SOFT_bformat_ex 1 +#define AL_AMBISONIC_LAYOUT_SOFT 0x1997 +#define AL_AMBISONIC_SCALING_SOFT 0x1998 + +/* Ambisonic layouts */ +#define AL_FUMA_SOFT 0x0000 +#define AL_ACN_SOFT 0x0001 + +/* Ambisonic scalings (normalization) */ +/*#define AL_FUMA_SOFT*/ +#define AL_SN3D_SOFT 0x0001 +#define AL_N3D_SOFT 0x0002 +#endif + +#ifndef ALC_SOFT_loopback_bformat +#define ALC_SOFT_loopback_bformat 1 +#define ALC_AMBISONIC_LAYOUT_SOFT 0x1997 +#define ALC_AMBISONIC_SCALING_SOFT 0x1998 +#define ALC_AMBISONIC_ORDER_SOFT 0x1999 +#define ALC_MAX_AMBISONIC_ORDER_SOFT 0x199B + +#define ALC_BFORMAT3D_SOFT 0x1507 + +/* Ambisonic layouts */ +#define ALC_FUMA_SOFT 0x0000 +#define ALC_ACN_SOFT 0x0001 + +/* Ambisonic scalings (normalization) */ +/*#define ALC_FUMA_SOFT*/ +#define ALC_SN3D_SOFT 0x0001 +#define ALC_N3D_SOFT 0x0002 +#endif + +#ifndef AL_SOFT_effect_target +#define AL_SOFT_effect_target +#define AL_EFFECTSLOT_TARGET_SOFT 0x199C +#endif + +#ifndef AL_SOFT_events +#define AL_SOFT_events 1 +#define AL_EVENT_CALLBACK_FUNCTION_SOFT 0x19A2 +#define AL_EVENT_CALLBACK_USER_PARAM_SOFT 0x19A3 +#define AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT 0x19A4 +#define AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT 0x19A5 +#define AL_EVENT_TYPE_DISCONNECTED_SOFT 0x19A6 +typedef void (AL_APIENTRY*ALEVENTPROCSOFT)(ALenum eventType, ALuint object, ALuint param, + ALsizei length, const ALchar *message, + void *userParam); +typedef void (AL_APIENTRY*LPALEVENTCONTROLSOFT)(ALsizei count, const ALenum *types, ALboolean enable); +typedef void (AL_APIENTRY*LPALEVENTCALLBACKSOFT)(ALEVENTPROCSOFT callback, void *userParam); +typedef void* (AL_APIENTRY*LPALGETPOINTERSOFT)(ALenum pname); +typedef void (AL_APIENTRY*LPALGETPOINTERVSOFT)(ALenum pname, void **values); +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alEventControlSOFT(ALsizei count, const ALenum *types, ALboolean enable); +AL_API void AL_APIENTRY alEventCallbackSOFT(ALEVENTPROCSOFT callback, void *userParam); +AL_API void* AL_APIENTRY alGetPointerSOFT(ALenum pname); +AL_API void AL_APIENTRY alGetPointervSOFT(ALenum pname, void **values); +#endif +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/3rdparty/OpenAL/include/efx-creative.h b/3rdparty/OpenAL/include/efx-creative.h new file mode 100644 index 0000000000..0a04c982e1 --- /dev/null +++ b/3rdparty/OpenAL/include/efx-creative.h @@ -0,0 +1,3 @@ +/* The tokens that would be defined here are already defined in efx.h. This + * empty file is here to provide compatibility with Windows-based projects + * that would include it. */ diff --git a/3rdparty/OpenAL/include/efx-presets.h b/3rdparty/OpenAL/include/efx-presets.h new file mode 100644 index 0000000000..8539fd5178 --- /dev/null +++ b/3rdparty/OpenAL/include/efx-presets.h @@ -0,0 +1,402 @@ +/* Reverb presets for EFX */ + +#ifndef EFX_PRESETS_H +#define EFX_PRESETS_H + +#ifndef EFXEAXREVERBPROPERTIES_DEFINED +#define EFXEAXREVERBPROPERTIES_DEFINED +typedef struct { + float flDensity; + float flDiffusion; + float flGain; + float flGainHF; + float flGainLF; + float flDecayTime; + float flDecayHFRatio; + float flDecayLFRatio; + float flReflectionsGain; + float flReflectionsDelay; + float flReflectionsPan[3]; + float flLateReverbGain; + float flLateReverbDelay; + float flLateReverbPan[3]; + float flEchoTime; + float flEchoDepth; + float flModulationTime; + float flModulationDepth; + float flAirAbsorptionGainHF; + float flHFReference; + float flLFReference; + float flRoomRolloffFactor; + int iDecayHFLimit; +} EFXEAXREVERBPROPERTIES, *LPEFXEAXREVERBPROPERTIES; +#endif + +/* Default Presets */ + +#define EFX_REVERB_PRESET_GENERIC \ + { 1.0000f, 1.0000f, 0.3162f, 0.8913f, 1.0000f, 1.4900f, 0.8300f, 1.0000f, 0.0500f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PADDEDCELL \ + { 0.1715f, 1.0000f, 0.3162f, 0.0010f, 1.0000f, 0.1700f, 0.1000f, 1.0000f, 0.2500f, 0.0010f, { 0.0000f, 0.0000f, 0.0000f }, 1.2691f, 0.0020f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ROOM \ + { 0.4287f, 1.0000f, 0.3162f, 0.5929f, 1.0000f, 0.4000f, 0.8300f, 1.0000f, 0.1503f, 0.0020f, { 0.0000f, 0.0000f, 0.0000f }, 1.0629f, 0.0030f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_BATHROOM \ + { 0.1715f, 1.0000f, 0.3162f, 0.2512f, 1.0000f, 1.4900f, 0.5400f, 1.0000f, 0.6531f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 3.2734f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_LIVINGROOM \ + { 0.9766f, 1.0000f, 0.3162f, 0.0010f, 1.0000f, 0.5000f, 0.1000f, 1.0000f, 0.2051f, 0.0030f, { 0.0000f, 0.0000f, 0.0000f }, 0.2805f, 0.0040f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_STONEROOM \ + { 1.0000f, 1.0000f, 0.3162f, 0.7079f, 1.0000f, 2.3100f, 0.6400f, 1.0000f, 0.4411f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.1003f, 0.0170f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_AUDITORIUM \ + { 1.0000f, 1.0000f, 0.3162f, 0.5781f, 1.0000f, 4.3200f, 0.5900f, 1.0000f, 0.4032f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.7170f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CONCERTHALL \ + { 1.0000f, 1.0000f, 0.3162f, 0.5623f, 1.0000f, 3.9200f, 0.7000f, 1.0000f, 0.2427f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.9977f, 0.0290f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CAVE \ + { 1.0000f, 1.0000f, 0.3162f, 1.0000f, 1.0000f, 2.9100f, 1.3000f, 1.0000f, 0.5000f, 0.0150f, { 0.0000f, 0.0000f, 0.0000f }, 0.7063f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_ARENA \ + { 1.0000f, 1.0000f, 0.3162f, 0.4477f, 1.0000f, 7.2400f, 0.3300f, 1.0000f, 0.2612f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 1.0186f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_HANGAR \ + { 1.0000f, 1.0000f, 0.3162f, 0.3162f, 1.0000f, 10.0500f, 0.2300f, 1.0000f, 0.5000f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 1.2560f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CARPETEDHALLWAY \ + { 0.4287f, 1.0000f, 0.3162f, 0.0100f, 1.0000f, 0.3000f, 0.1000f, 1.0000f, 0.1215f, 0.0020f, { 0.0000f, 0.0000f, 0.0000f }, 0.1531f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_HALLWAY \ + { 0.3645f, 1.0000f, 0.3162f, 0.7079f, 1.0000f, 1.4900f, 0.5900f, 1.0000f, 0.2458f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.6615f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_STONECORRIDOR \ + { 1.0000f, 1.0000f, 0.3162f, 0.7612f, 1.0000f, 2.7000f, 0.7900f, 1.0000f, 0.2472f, 0.0130f, { 0.0000f, 0.0000f, 0.0000f }, 1.5758f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ALLEY \ + { 1.0000f, 0.3000f, 0.3162f, 0.7328f, 1.0000f, 1.4900f, 0.8600f, 1.0000f, 0.2500f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 0.9954f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.1250f, 0.9500f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FOREST \ + { 1.0000f, 0.3000f, 0.3162f, 0.0224f, 1.0000f, 1.4900f, 0.5400f, 1.0000f, 0.0525f, 0.1620f, { 0.0000f, 0.0000f, 0.0000f }, 0.7682f, 0.0880f, { 0.0000f, 0.0000f, 0.0000f }, 0.1250f, 1.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CITY \ + { 1.0000f, 0.5000f, 0.3162f, 0.3981f, 1.0000f, 1.4900f, 0.6700f, 1.0000f, 0.0730f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 0.1427f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_MOUNTAINS \ + { 1.0000f, 0.2700f, 0.3162f, 0.0562f, 1.0000f, 1.4900f, 0.2100f, 1.0000f, 0.0407f, 0.3000f, { 0.0000f, 0.0000f, 0.0000f }, 0.1919f, 0.1000f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 1.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_QUARRY \ + { 1.0000f, 1.0000f, 0.3162f, 0.3162f, 1.0000f, 1.4900f, 0.8300f, 1.0000f, 0.0000f, 0.0610f, { 0.0000f, 0.0000f, 0.0000f }, 1.7783f, 0.0250f, { 0.0000f, 0.0000f, 0.0000f }, 0.1250f, 0.7000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PLAIN \ + { 1.0000f, 0.2100f, 0.3162f, 0.1000f, 1.0000f, 1.4900f, 0.5000f, 1.0000f, 0.0585f, 0.1790f, { 0.0000f, 0.0000f, 0.0000f }, 0.1089f, 0.1000f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 1.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PARKINGLOT \ + { 1.0000f, 1.0000f, 0.3162f, 1.0000f, 1.0000f, 1.6500f, 1.5000f, 1.0000f, 0.2082f, 0.0080f, { 0.0000f, 0.0000f, 0.0000f }, 0.2652f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_SEWERPIPE \ + { 0.3071f, 0.8000f, 0.3162f, 0.3162f, 1.0000f, 2.8100f, 0.1400f, 1.0000f, 1.6387f, 0.0140f, { 0.0000f, 0.0000f, 0.0000f }, 3.2471f, 0.0210f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_UNDERWATER \ + { 0.3645f, 1.0000f, 0.3162f, 0.0100f, 1.0000f, 1.4900f, 0.1000f, 1.0000f, 0.5963f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 7.0795f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 1.1800f, 0.3480f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_DRUGGED \ + { 0.4287f, 0.5000f, 0.3162f, 1.0000f, 1.0000f, 8.3900f, 1.3900f, 1.0000f, 0.8760f, 0.0020f, { 0.0000f, 0.0000f, 0.0000f }, 3.1081f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 1.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_DIZZY \ + { 0.3645f, 0.6000f, 0.3162f, 0.6310f, 1.0000f, 17.2300f, 0.5600f, 1.0000f, 0.1392f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.4937f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 1.0000f, 0.8100f, 0.3100f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_PSYCHOTIC \ + { 0.0625f, 0.5000f, 0.3162f, 0.8404f, 1.0000f, 7.5600f, 0.9100f, 1.0000f, 0.4864f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 2.4378f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 4.0000f, 1.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +/* Castle Presets */ + +#define EFX_REVERB_PRESET_CASTLE_SMALLROOM \ + { 1.0000f, 0.8900f, 0.3162f, 0.3981f, 0.1000f, 1.2200f, 0.8300f, 0.3100f, 0.8913f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 1.9953f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.1380f, 0.0800f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_SHORTPASSAGE \ + { 1.0000f, 0.8900f, 0.3162f, 0.3162f, 0.1000f, 2.3200f, 0.8300f, 0.3100f, 0.8913f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0230f, { 0.0000f, 0.0000f, 0.0000f }, 0.1380f, 0.0800f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_MEDIUMROOM \ + { 1.0000f, 0.9300f, 0.3162f, 0.2818f, 0.1000f, 2.0400f, 0.8300f, 0.4600f, 0.6310f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 1.5849f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.1550f, 0.0300f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_LARGEROOM \ + { 1.0000f, 0.8200f, 0.3162f, 0.2818f, 0.1259f, 2.5300f, 0.8300f, 0.5000f, 0.4467f, 0.0340f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0160f, { 0.0000f, 0.0000f, 0.0000f }, 0.1850f, 0.0700f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_LONGPASSAGE \ + { 1.0000f, 0.8900f, 0.3162f, 0.3981f, 0.1000f, 3.4200f, 0.8300f, 0.3100f, 0.8913f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0230f, { 0.0000f, 0.0000f, 0.0000f }, 0.1380f, 0.0800f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_HALL \ + { 1.0000f, 0.8100f, 0.3162f, 0.2818f, 0.1778f, 3.1400f, 0.7900f, 0.6200f, 0.1778f, 0.0560f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0240f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_CUPBOARD \ + { 1.0000f, 0.8900f, 0.3162f, 0.2818f, 0.1000f, 0.6700f, 0.8700f, 0.3100f, 1.4125f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 3.5481f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 0.1380f, 0.0800f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CASTLE_COURTYARD \ + { 1.0000f, 0.4200f, 0.3162f, 0.4467f, 0.1995f, 2.1300f, 0.6100f, 0.2300f, 0.2239f, 0.1600f, { 0.0000f, 0.0000f, 0.0000f }, 0.7079f, 0.0360f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.3700f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_CASTLE_ALCOVE \ + { 1.0000f, 0.8900f, 0.3162f, 0.5012f, 0.1000f, 1.6400f, 0.8700f, 0.3100f, 1.0000f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0340f, { 0.0000f, 0.0000f, 0.0000f }, 0.1380f, 0.0800f, 0.2500f, 0.0000f, 0.9943f, 5168.6001f, 139.5000f, 0.0000f, 0x1 } + +/* Factory Presets */ + +#define EFX_REVERB_PRESET_FACTORY_SMALLROOM \ + { 0.3645f, 0.8200f, 0.3162f, 0.7943f, 0.5012f, 1.7200f, 0.6500f, 1.3100f, 0.7079f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.7783f, 0.0240f, { 0.0000f, 0.0000f, 0.0000f }, 0.1190f, 0.0700f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_SHORTPASSAGE \ + { 0.3645f, 0.6400f, 0.2512f, 0.7943f, 0.5012f, 2.5300f, 0.6500f, 1.3100f, 1.0000f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0380f, { 0.0000f, 0.0000f, 0.0000f }, 0.1350f, 0.2300f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_MEDIUMROOM \ + { 0.4287f, 0.8200f, 0.2512f, 0.7943f, 0.5012f, 2.7600f, 0.6500f, 1.3100f, 0.2818f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0230f, { 0.0000f, 0.0000f, 0.0000f }, 0.1740f, 0.0700f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_LARGEROOM \ + { 0.4287f, 0.7500f, 0.2512f, 0.7079f, 0.6310f, 4.2400f, 0.5100f, 1.3100f, 0.1778f, 0.0390f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0230f, { 0.0000f, 0.0000f, 0.0000f }, 0.2310f, 0.0700f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_LONGPASSAGE \ + { 0.3645f, 0.6400f, 0.2512f, 0.7943f, 0.5012f, 4.0600f, 0.6500f, 1.3100f, 1.0000f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0370f, { 0.0000f, 0.0000f, 0.0000f }, 0.1350f, 0.2300f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_HALL \ + { 0.4287f, 0.7500f, 0.3162f, 0.7079f, 0.6310f, 7.4300f, 0.5100f, 1.3100f, 0.0631f, 0.0730f, { 0.0000f, 0.0000f, 0.0000f }, 0.8913f, 0.0270f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0700f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_CUPBOARD \ + { 0.3071f, 0.6300f, 0.2512f, 0.7943f, 0.5012f, 0.4900f, 0.6500f, 1.3100f, 1.2589f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.9953f, 0.0320f, { 0.0000f, 0.0000f, 0.0000f }, 0.1070f, 0.0700f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_COURTYARD \ + { 0.3071f, 0.5700f, 0.3162f, 0.3162f, 0.6310f, 2.3200f, 0.2900f, 0.5600f, 0.2239f, 0.1400f, { 0.0000f, 0.0000f, 0.0000f }, 0.3981f, 0.0390f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.2900f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_FACTORY_ALCOVE \ + { 0.3645f, 0.5900f, 0.2512f, 0.7943f, 0.5012f, 3.1400f, 0.6500f, 1.3100f, 1.4125f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.0000f, 0.0380f, { 0.0000f, 0.0000f, 0.0000f }, 0.1140f, 0.1000f, 0.2500f, 0.0000f, 0.9943f, 3762.6001f, 362.5000f, 0.0000f, 0x1 } + +/* Ice Palace Presets */ + +#define EFX_REVERB_PRESET_ICEPALACE_SMALLROOM \ + { 1.0000f, 0.8400f, 0.3162f, 0.5623f, 0.2818f, 1.5100f, 1.5300f, 0.2700f, 0.8913f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.1640f, 0.1400f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_SHORTPASSAGE \ + { 1.0000f, 0.7500f, 0.3162f, 0.5623f, 0.2818f, 1.7900f, 1.4600f, 0.2800f, 0.5012f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0190f, { 0.0000f, 0.0000f, 0.0000f }, 0.1770f, 0.0900f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_MEDIUMROOM \ + { 1.0000f, 0.8700f, 0.3162f, 0.5623f, 0.4467f, 2.2200f, 1.5300f, 0.3200f, 0.3981f, 0.0390f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0270f, { 0.0000f, 0.0000f, 0.0000f }, 0.1860f, 0.1200f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_LARGEROOM \ + { 1.0000f, 0.8100f, 0.3162f, 0.5623f, 0.4467f, 3.1400f, 1.5300f, 0.3200f, 0.2512f, 0.0390f, { 0.0000f, 0.0000f, 0.0000f }, 1.0000f, 0.0270f, { 0.0000f, 0.0000f, 0.0000f }, 0.2140f, 0.1100f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_LONGPASSAGE \ + { 1.0000f, 0.7700f, 0.3162f, 0.5623f, 0.3981f, 3.0100f, 1.4600f, 0.2800f, 0.7943f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0250f, { 0.0000f, 0.0000f, 0.0000f }, 0.1860f, 0.0400f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_HALL \ + { 1.0000f, 0.7600f, 0.3162f, 0.4467f, 0.5623f, 5.4900f, 1.5300f, 0.3800f, 0.1122f, 0.0540f, { 0.0000f, 0.0000f, 0.0000f }, 0.6310f, 0.0520f, { 0.0000f, 0.0000f, 0.0000f }, 0.2260f, 0.1100f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_CUPBOARD \ + { 1.0000f, 0.8300f, 0.3162f, 0.5012f, 0.2239f, 0.7600f, 1.5300f, 0.2600f, 1.1220f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.9953f, 0.0160f, { 0.0000f, 0.0000f, 0.0000f }, 0.1430f, 0.0800f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_COURTYARD \ + { 1.0000f, 0.5900f, 0.3162f, 0.2818f, 0.3162f, 2.0400f, 1.2000f, 0.3800f, 0.3162f, 0.1730f, { 0.0000f, 0.0000f, 0.0000f }, 0.3162f, 0.0430f, { 0.0000f, 0.0000f, 0.0000f }, 0.2350f, 0.4800f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_ICEPALACE_ALCOVE \ + { 1.0000f, 0.8400f, 0.3162f, 0.5623f, 0.2818f, 2.7600f, 1.4600f, 0.2800f, 1.1220f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 0.8913f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.1610f, 0.0900f, 0.2500f, 0.0000f, 0.9943f, 12428.5000f, 99.6000f, 0.0000f, 0x1 } + +/* Space Station Presets */ + +#define EFX_REVERB_PRESET_SPACESTATION_SMALLROOM \ + { 0.2109f, 0.7000f, 0.3162f, 0.7079f, 0.8913f, 1.7200f, 0.8200f, 0.5500f, 0.7943f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0130f, { 0.0000f, 0.0000f, 0.0000f }, 0.1880f, 0.2600f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_SHORTPASSAGE \ + { 0.2109f, 0.8700f, 0.3162f, 0.6310f, 0.8913f, 3.5700f, 0.5000f, 0.5500f, 1.0000f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0160f, { 0.0000f, 0.0000f, 0.0000f }, 0.1720f, 0.2000f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_MEDIUMROOM \ + { 0.2109f, 0.7500f, 0.3162f, 0.6310f, 0.8913f, 3.0100f, 0.5000f, 0.5500f, 0.3981f, 0.0340f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0350f, { 0.0000f, 0.0000f, 0.0000f }, 0.2090f, 0.3100f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_LARGEROOM \ + { 0.3645f, 0.8100f, 0.3162f, 0.6310f, 0.8913f, 3.8900f, 0.3800f, 0.6100f, 0.3162f, 0.0560f, { 0.0000f, 0.0000f, 0.0000f }, 0.8913f, 0.0350f, { 0.0000f, 0.0000f, 0.0000f }, 0.2330f, 0.2800f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_LONGPASSAGE \ + { 0.4287f, 0.8200f, 0.3162f, 0.6310f, 0.8913f, 4.6200f, 0.6200f, 0.5500f, 1.0000f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0310f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.2300f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_HALL \ + { 0.4287f, 0.8700f, 0.3162f, 0.6310f, 0.8913f, 7.1100f, 0.3800f, 0.6100f, 0.1778f, 0.1000f, { 0.0000f, 0.0000f, 0.0000f }, 0.6310f, 0.0470f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.2500f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_CUPBOARD \ + { 0.1715f, 0.5600f, 0.3162f, 0.7079f, 0.8913f, 0.7900f, 0.8100f, 0.5500f, 1.4125f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.7783f, 0.0180f, { 0.0000f, 0.0000f, 0.0000f }, 0.1810f, 0.3100f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPACESTATION_ALCOVE \ + { 0.2109f, 0.7800f, 0.3162f, 0.7079f, 0.8913f, 1.1600f, 0.8100f, 0.5500f, 1.4125f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 1.0000f, 0.0180f, { 0.0000f, 0.0000f, 0.0000f }, 0.1920f, 0.2100f, 0.2500f, 0.0000f, 0.9943f, 3316.1001f, 458.2000f, 0.0000f, 0x1 } + +/* Wooden Galleon Presets */ + +#define EFX_REVERB_PRESET_WOODEN_SMALLROOM \ + { 1.0000f, 1.0000f, 0.3162f, 0.1122f, 0.3162f, 0.7900f, 0.3200f, 0.8700f, 1.0000f, 0.0320f, { 0.0000f, 0.0000f, 0.0000f }, 0.8913f, 0.0290f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_SHORTPASSAGE \ + { 1.0000f, 1.0000f, 0.3162f, 0.1259f, 0.3162f, 1.7500f, 0.5000f, 0.8700f, 0.8913f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 0.6310f, 0.0240f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_MEDIUMROOM \ + { 1.0000f, 1.0000f, 0.3162f, 0.1000f, 0.2818f, 1.4700f, 0.4200f, 0.8200f, 0.8913f, 0.0490f, { 0.0000f, 0.0000f, 0.0000f }, 0.8913f, 0.0290f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_LARGEROOM \ + { 1.0000f, 1.0000f, 0.3162f, 0.0891f, 0.2818f, 2.6500f, 0.3300f, 0.8200f, 0.8913f, 0.0660f, { 0.0000f, 0.0000f, 0.0000f }, 0.7943f, 0.0490f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_LONGPASSAGE \ + { 1.0000f, 1.0000f, 0.3162f, 0.1000f, 0.3162f, 1.9900f, 0.4000f, 0.7900f, 1.0000f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.4467f, 0.0360f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_HALL \ + { 1.0000f, 1.0000f, 0.3162f, 0.0794f, 0.2818f, 3.4500f, 0.3000f, 0.8200f, 0.8913f, 0.0880f, { 0.0000f, 0.0000f, 0.0000f }, 0.7943f, 0.0630f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_CUPBOARD \ + { 1.0000f, 1.0000f, 0.3162f, 0.1413f, 0.3162f, 0.5600f, 0.4600f, 0.9100f, 1.1220f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0280f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_COURTYARD \ + { 1.0000f, 0.6500f, 0.3162f, 0.0794f, 0.3162f, 1.7900f, 0.3500f, 0.7900f, 0.5623f, 0.1230f, { 0.0000f, 0.0000f, 0.0000f }, 0.1000f, 0.0320f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_WOODEN_ALCOVE \ + { 1.0000f, 1.0000f, 0.3162f, 0.1259f, 0.3162f, 1.2200f, 0.6200f, 0.9100f, 1.1220f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 0.7079f, 0.0240f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 4705.0000f, 99.6000f, 0.0000f, 0x1 } + +/* Sports Presets */ + +#define EFX_REVERB_PRESET_SPORT_EMPTYSTADIUM \ + { 1.0000f, 1.0000f, 0.3162f, 0.4467f, 0.7943f, 6.2600f, 0.5100f, 1.1000f, 0.0631f, 0.1830f, { 0.0000f, 0.0000f, 0.0000f }, 0.3981f, 0.0380f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPORT_SQUASHCOURT \ + { 1.0000f, 0.7500f, 0.3162f, 0.3162f, 0.7943f, 2.2200f, 0.9100f, 1.1600f, 0.4467f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 0.7943f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.1260f, 0.1900f, 0.2500f, 0.0000f, 0.9943f, 7176.8999f, 211.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPORT_SMALLSWIMMINGPOOL \ + { 1.0000f, 0.7000f, 0.3162f, 0.7943f, 0.8913f, 2.7600f, 1.2500f, 1.1400f, 0.6310f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.7943f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.1790f, 0.1500f, 0.8950f, 0.1900f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_SPORT_LARGESWIMMINGPOOL \ + { 1.0000f, 0.8200f, 0.3162f, 0.7943f, 1.0000f, 5.4900f, 1.3100f, 1.1400f, 0.4467f, 0.0390f, { 0.0000f, 0.0000f, 0.0000f }, 0.5012f, 0.0490f, { 0.0000f, 0.0000f, 0.0000f }, 0.2220f, 0.5500f, 1.1590f, 0.2100f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_SPORT_GYMNASIUM \ + { 1.0000f, 0.8100f, 0.3162f, 0.4467f, 0.8913f, 3.1400f, 1.0600f, 1.3500f, 0.3981f, 0.0290f, { 0.0000f, 0.0000f, 0.0000f }, 0.5623f, 0.0450f, { 0.0000f, 0.0000f, 0.0000f }, 0.1460f, 0.1400f, 0.2500f, 0.0000f, 0.9943f, 7176.8999f, 211.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPORT_FULLSTADIUM \ + { 1.0000f, 1.0000f, 0.3162f, 0.0708f, 0.7943f, 5.2500f, 0.1700f, 0.8000f, 0.1000f, 0.1880f, { 0.0000f, 0.0000f, 0.0000f }, 0.2818f, 0.0380f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SPORT_STADIUMTANNOY \ + { 1.0000f, 0.7800f, 0.3162f, 0.5623f, 0.5012f, 2.5300f, 0.8800f, 0.6800f, 0.2818f, 0.2300f, { 0.0000f, 0.0000f, 0.0000f }, 0.5012f, 0.0630f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.2000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +/* Prefab Presets */ + +#define EFX_REVERB_PRESET_PREFAB_WORKSHOP \ + { 0.4287f, 1.0000f, 0.3162f, 0.1413f, 0.3981f, 0.7600f, 1.0000f, 1.0000f, 1.0000f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_PREFAB_SCHOOLROOM \ + { 0.4022f, 0.6900f, 0.3162f, 0.6310f, 0.5012f, 0.9800f, 0.4500f, 0.1800f, 1.4125f, 0.0170f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0150f, { 0.0000f, 0.0000f, 0.0000f }, 0.0950f, 0.1400f, 0.2500f, 0.0000f, 0.9943f, 7176.8999f, 211.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PREFAB_PRACTISEROOM \ + { 0.4022f, 0.8700f, 0.3162f, 0.3981f, 0.5012f, 1.1200f, 0.5600f, 0.1800f, 1.2589f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0110f, { 0.0000f, 0.0000f, 0.0000f }, 0.0950f, 0.1400f, 0.2500f, 0.0000f, 0.9943f, 7176.8999f, 211.2000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PREFAB_OUTHOUSE \ + { 1.0000f, 0.8200f, 0.3162f, 0.1122f, 0.1585f, 1.3800f, 0.3800f, 0.3500f, 0.8913f, 0.0240f, { 0.0000f, 0.0000f, -0.0000f }, 0.6310f, 0.0440f, { 0.0000f, 0.0000f, 0.0000f }, 0.1210f, 0.1700f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 107.5000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_PREFAB_CARAVAN \ + { 1.0000f, 1.0000f, 0.3162f, 0.0891f, 0.1259f, 0.4300f, 1.5000f, 1.0000f, 1.0000f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 1.9953f, 0.0120f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +/* Dome and Pipe Presets */ + +#define EFX_REVERB_PRESET_DOME_TOMB \ + { 1.0000f, 0.7900f, 0.3162f, 0.3548f, 0.2239f, 4.1800f, 0.2100f, 0.1000f, 0.3868f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 1.6788f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 0.1770f, 0.1900f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 20.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_PIPE_SMALL \ + { 1.0000f, 1.0000f, 0.3162f, 0.3548f, 0.2239f, 5.0400f, 0.1000f, 0.1000f, 0.5012f, 0.0320f, { 0.0000f, 0.0000f, 0.0000f }, 2.5119f, 0.0150f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 20.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_DOME_SAINTPAULS \ + { 1.0000f, 0.8700f, 0.3162f, 0.3548f, 0.2239f, 10.4800f, 0.1900f, 0.1000f, 0.1778f, 0.0900f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0420f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.1200f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 20.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PIPE_LONGTHIN \ + { 0.2560f, 0.9100f, 0.3162f, 0.4467f, 0.2818f, 9.2100f, 0.1800f, 0.1000f, 0.7079f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 0.7079f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 20.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_PIPE_LARGE \ + { 1.0000f, 1.0000f, 0.3162f, 0.3548f, 0.2239f, 8.4500f, 0.1000f, 0.1000f, 0.3981f, 0.0460f, { 0.0000f, 0.0000f, 0.0000f }, 1.5849f, 0.0320f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 20.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_PIPE_RESONANT \ + { 0.1373f, 0.9100f, 0.3162f, 0.4467f, 0.2818f, 6.8100f, 0.1800f, 0.1000f, 0.7079f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.0000f, 0.0220f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 20.0000f, 0.0000f, 0x0 } + +/* Outdoors Presets */ + +#define EFX_REVERB_PRESET_OUTDOORS_BACKYARD \ + { 1.0000f, 0.4500f, 0.3162f, 0.2512f, 0.5012f, 1.1200f, 0.3400f, 0.4600f, 0.4467f, 0.0690f, { 0.0000f, 0.0000f, -0.0000f }, 0.7079f, 0.0230f, { 0.0000f, 0.0000f, 0.0000f }, 0.2180f, 0.3400f, 0.2500f, 0.0000f, 0.9943f, 4399.1001f, 242.9000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_OUTDOORS_ROLLINGPLAINS \ + { 1.0000f, 0.0000f, 0.3162f, 0.0112f, 0.6310f, 2.1300f, 0.2100f, 0.4600f, 0.1778f, 0.3000f, { 0.0000f, 0.0000f, -0.0000f }, 0.4467f, 0.0190f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 1.0000f, 0.2500f, 0.0000f, 0.9943f, 4399.1001f, 242.9000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_OUTDOORS_DEEPCANYON \ + { 1.0000f, 0.7400f, 0.3162f, 0.1778f, 0.6310f, 3.8900f, 0.2100f, 0.4600f, 0.3162f, 0.2230f, { 0.0000f, 0.0000f, -0.0000f }, 0.3548f, 0.0190f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 1.0000f, 0.2500f, 0.0000f, 0.9943f, 4399.1001f, 242.9000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_OUTDOORS_CREEK \ + { 1.0000f, 0.3500f, 0.3162f, 0.1778f, 0.5012f, 2.1300f, 0.2100f, 0.4600f, 0.3981f, 0.1150f, { 0.0000f, 0.0000f, -0.0000f }, 0.1995f, 0.0310f, { 0.0000f, 0.0000f, 0.0000f }, 0.2180f, 0.3400f, 0.2500f, 0.0000f, 0.9943f, 4399.1001f, 242.9000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_OUTDOORS_VALLEY \ + { 1.0000f, 0.2800f, 0.3162f, 0.0282f, 0.1585f, 2.8800f, 0.2600f, 0.3500f, 0.1413f, 0.2630f, { 0.0000f, 0.0000f, -0.0000f }, 0.3981f, 0.1000f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.3400f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 107.5000f, 0.0000f, 0x0 } + +/* Mood Presets */ + +#define EFX_REVERB_PRESET_MOOD_HEAVEN \ + { 1.0000f, 0.9400f, 0.3162f, 0.7943f, 0.4467f, 5.0400f, 1.1200f, 0.5600f, 0.2427f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0290f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0800f, 2.7420f, 0.0500f, 0.9977f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_MOOD_HELL \ + { 1.0000f, 0.5700f, 0.3162f, 0.3548f, 0.4467f, 3.5700f, 0.4900f, 2.0000f, 0.0000f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.1100f, 0.0400f, 2.1090f, 0.5200f, 0.9943f, 5000.0000f, 139.5000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_MOOD_MEMORY \ + { 1.0000f, 0.8500f, 0.3162f, 0.6310f, 0.3548f, 4.0600f, 0.8200f, 0.5600f, 0.0398f, 0.0000f, { 0.0000f, 0.0000f, 0.0000f }, 1.1220f, 0.0000f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.4740f, 0.4500f, 0.9886f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +/* Driving Presets */ + +#define EFX_REVERB_PRESET_DRIVING_COMMENTATOR \ + { 1.0000f, 0.0000f, 0.3162f, 0.5623f, 0.5012f, 2.4200f, 0.8800f, 0.6800f, 0.1995f, 0.0930f, { 0.0000f, 0.0000f, 0.0000f }, 0.2512f, 0.0170f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 1.0000f, 0.2500f, 0.0000f, 0.9886f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_DRIVING_PITGARAGE \ + { 0.4287f, 0.5900f, 0.3162f, 0.7079f, 0.5623f, 1.7200f, 0.9300f, 0.8700f, 0.5623f, 0.0000f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0160f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.1100f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_DRIVING_INCAR_RACER \ + { 0.0832f, 0.8000f, 0.3162f, 1.0000f, 0.7943f, 0.1700f, 2.0000f, 0.4100f, 1.7783f, 0.0070f, { 0.0000f, 0.0000f, 0.0000f }, 0.7079f, 0.0150f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 10268.2002f, 251.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_DRIVING_INCAR_SPORTS \ + { 0.0832f, 0.8000f, 0.3162f, 0.6310f, 1.0000f, 0.1700f, 0.7500f, 0.4100f, 1.0000f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 0.5623f, 0.0000f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 10268.2002f, 251.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_DRIVING_INCAR_LUXURY \ + { 0.2560f, 1.0000f, 0.3162f, 0.1000f, 0.5012f, 0.1300f, 0.4100f, 0.4600f, 0.7943f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 1.5849f, 0.0100f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 10268.2002f, 251.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_DRIVING_FULLGRANDSTAND \ + { 1.0000f, 1.0000f, 0.3162f, 0.2818f, 0.6310f, 3.0100f, 1.3700f, 1.2800f, 0.3548f, 0.0900f, { 0.0000f, 0.0000f, 0.0000f }, 0.1778f, 0.0490f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 10420.2002f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_DRIVING_EMPTYGRANDSTAND \ + { 1.0000f, 1.0000f, 0.3162f, 1.0000f, 0.7943f, 4.6200f, 1.7500f, 1.4000f, 0.2082f, 0.0900f, { 0.0000f, 0.0000f, 0.0000f }, 0.2512f, 0.0490f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.0000f, 0.9943f, 10420.2002f, 250.0000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_DRIVING_TUNNEL \ + { 1.0000f, 0.8100f, 0.3162f, 0.3981f, 0.8913f, 3.4200f, 0.9400f, 1.3100f, 0.7079f, 0.0510f, { 0.0000f, 0.0000f, 0.0000f }, 0.7079f, 0.0470f, { 0.0000f, 0.0000f, 0.0000f }, 0.2140f, 0.0500f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 155.3000f, 0.0000f, 0x1 } + +/* City Presets */ + +#define EFX_REVERB_PRESET_CITY_STREETS \ + { 1.0000f, 0.7800f, 0.3162f, 0.7079f, 0.8913f, 1.7900f, 1.1200f, 0.9100f, 0.2818f, 0.0460f, { 0.0000f, 0.0000f, 0.0000f }, 0.1995f, 0.0280f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.2000f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CITY_SUBWAY \ + { 1.0000f, 0.7400f, 0.3162f, 0.7079f, 0.8913f, 3.0100f, 1.2300f, 0.9100f, 0.7079f, 0.0460f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0280f, { 0.0000f, 0.0000f, 0.0000f }, 0.1250f, 0.2100f, 0.2500f, 0.0000f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CITY_MUSEUM \ + { 1.0000f, 0.8200f, 0.3162f, 0.1778f, 0.1778f, 3.2800f, 1.4000f, 0.5700f, 0.2512f, 0.0390f, { 0.0000f, 0.0000f, -0.0000f }, 0.8913f, 0.0340f, { 0.0000f, 0.0000f, 0.0000f }, 0.1300f, 0.1700f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 107.5000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_CITY_LIBRARY \ + { 1.0000f, 0.8200f, 0.3162f, 0.2818f, 0.0891f, 2.7600f, 0.8900f, 0.4100f, 0.3548f, 0.0290f, { 0.0000f, 0.0000f, -0.0000f }, 0.8913f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 0.1300f, 0.1700f, 0.2500f, 0.0000f, 0.9943f, 2854.3999f, 107.5000f, 0.0000f, 0x0 } + +#define EFX_REVERB_PRESET_CITY_UNDERPASS \ + { 1.0000f, 0.8200f, 0.3162f, 0.4467f, 0.8913f, 3.5700f, 1.1200f, 0.9100f, 0.3981f, 0.0590f, { 0.0000f, 0.0000f, 0.0000f }, 0.8913f, 0.0370f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.1400f, 0.2500f, 0.0000f, 0.9920f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CITY_ABANDONED \ + { 1.0000f, 0.6900f, 0.3162f, 0.7943f, 0.8913f, 3.2800f, 1.1700f, 0.9100f, 0.4467f, 0.0440f, { 0.0000f, 0.0000f, 0.0000f }, 0.2818f, 0.0240f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.2000f, 0.2500f, 0.0000f, 0.9966f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +/* Misc. Presets */ + +#define EFX_REVERB_PRESET_DUSTYROOM \ + { 0.3645f, 0.5600f, 0.3162f, 0.7943f, 0.7079f, 1.7900f, 0.3800f, 0.2100f, 0.5012f, 0.0020f, { 0.0000f, 0.0000f, 0.0000f }, 1.2589f, 0.0060f, { 0.0000f, 0.0000f, 0.0000f }, 0.2020f, 0.0500f, 0.2500f, 0.0000f, 0.9886f, 13046.0000f, 163.3000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_CHAPEL \ + { 1.0000f, 0.8400f, 0.3162f, 0.5623f, 1.0000f, 4.6200f, 0.6400f, 1.2300f, 0.4467f, 0.0320f, { 0.0000f, 0.0000f, 0.0000f }, 0.7943f, 0.0490f, { 0.0000f, 0.0000f, 0.0000f }, 0.2500f, 0.0000f, 0.2500f, 0.1100f, 0.9943f, 5000.0000f, 250.0000f, 0.0000f, 0x1 } + +#define EFX_REVERB_PRESET_SMALLWATERROOM \ + { 1.0000f, 0.7000f, 0.3162f, 0.4477f, 1.0000f, 1.5100f, 1.2500f, 1.1400f, 0.8913f, 0.0200f, { 0.0000f, 0.0000f, 0.0000f }, 1.4125f, 0.0300f, { 0.0000f, 0.0000f, 0.0000f }, 0.1790f, 0.1500f, 0.8950f, 0.1900f, 0.9920f, 5000.0000f, 250.0000f, 0.0000f, 0x0 } + +#endif /* EFX_PRESETS_H */ diff --git a/3rdparty/OpenAL/include/efx.h b/3rdparty/OpenAL/include/efx.h new file mode 100644 index 0000000000..5ab64a64d7 --- /dev/null +++ b/3rdparty/OpenAL/include/efx.h @@ -0,0 +1,762 @@ +#ifndef AL_EFX_H +#define AL_EFX_H + +#include + +#include "alc.h" +#include "al.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define ALC_EXT_EFX_NAME "ALC_EXT_EFX" + +#define ALC_EFX_MAJOR_VERSION 0x20001 +#define ALC_EFX_MINOR_VERSION 0x20002 +#define ALC_MAX_AUXILIARY_SENDS 0x20003 + + +/* Listener properties. */ +#define AL_METERS_PER_UNIT 0x20004 + +/* Source properties. */ +#define AL_DIRECT_FILTER 0x20005 +#define AL_AUXILIARY_SEND_FILTER 0x20006 +#define AL_AIR_ABSORPTION_FACTOR 0x20007 +#define AL_ROOM_ROLLOFF_FACTOR 0x20008 +#define AL_CONE_OUTER_GAINHF 0x20009 +#define AL_DIRECT_FILTER_GAINHF_AUTO 0x2000A +#define AL_AUXILIARY_SEND_FILTER_GAIN_AUTO 0x2000B +#define AL_AUXILIARY_SEND_FILTER_GAINHF_AUTO 0x2000C + + +/* Effect properties. */ + +/* Reverb effect parameters */ +#define AL_REVERB_DENSITY 0x0001 +#define AL_REVERB_DIFFUSION 0x0002 +#define AL_REVERB_GAIN 0x0003 +#define AL_REVERB_GAINHF 0x0004 +#define AL_REVERB_DECAY_TIME 0x0005 +#define AL_REVERB_DECAY_HFRATIO 0x0006 +#define AL_REVERB_REFLECTIONS_GAIN 0x0007 +#define AL_REVERB_REFLECTIONS_DELAY 0x0008 +#define AL_REVERB_LATE_REVERB_GAIN 0x0009 +#define AL_REVERB_LATE_REVERB_DELAY 0x000A +#define AL_REVERB_AIR_ABSORPTION_GAINHF 0x000B +#define AL_REVERB_ROOM_ROLLOFF_FACTOR 0x000C +#define AL_REVERB_DECAY_HFLIMIT 0x000D + +/* EAX Reverb effect parameters */ +#define AL_EAXREVERB_DENSITY 0x0001 +#define AL_EAXREVERB_DIFFUSION 0x0002 +#define AL_EAXREVERB_GAIN 0x0003 +#define AL_EAXREVERB_GAINHF 0x0004 +#define AL_EAXREVERB_GAINLF 0x0005 +#define AL_EAXREVERB_DECAY_TIME 0x0006 +#define AL_EAXREVERB_DECAY_HFRATIO 0x0007 +#define AL_EAXREVERB_DECAY_LFRATIO 0x0008 +#define AL_EAXREVERB_REFLECTIONS_GAIN 0x0009 +#define AL_EAXREVERB_REFLECTIONS_DELAY 0x000A +#define AL_EAXREVERB_REFLECTIONS_PAN 0x000B +#define AL_EAXREVERB_LATE_REVERB_GAIN 0x000C +#define AL_EAXREVERB_LATE_REVERB_DELAY 0x000D +#define AL_EAXREVERB_LATE_REVERB_PAN 0x000E +#define AL_EAXREVERB_ECHO_TIME 0x000F +#define AL_EAXREVERB_ECHO_DEPTH 0x0010 +#define AL_EAXREVERB_MODULATION_TIME 0x0011 +#define AL_EAXREVERB_MODULATION_DEPTH 0x0012 +#define AL_EAXREVERB_AIR_ABSORPTION_GAINHF 0x0013 +#define AL_EAXREVERB_HFREFERENCE 0x0014 +#define AL_EAXREVERB_LFREFERENCE 0x0015 +#define AL_EAXREVERB_ROOM_ROLLOFF_FACTOR 0x0016 +#define AL_EAXREVERB_DECAY_HFLIMIT 0x0017 + +/* Chorus effect parameters */ +#define AL_CHORUS_WAVEFORM 0x0001 +#define AL_CHORUS_PHASE 0x0002 +#define AL_CHORUS_RATE 0x0003 +#define AL_CHORUS_DEPTH 0x0004 +#define AL_CHORUS_FEEDBACK 0x0005 +#define AL_CHORUS_DELAY 0x0006 + +/* Distortion effect parameters */ +#define AL_DISTORTION_EDGE 0x0001 +#define AL_DISTORTION_GAIN 0x0002 +#define AL_DISTORTION_LOWPASS_CUTOFF 0x0003 +#define AL_DISTORTION_EQCENTER 0x0004 +#define AL_DISTORTION_EQBANDWIDTH 0x0005 + +/* Echo effect parameters */ +#define AL_ECHO_DELAY 0x0001 +#define AL_ECHO_LRDELAY 0x0002 +#define AL_ECHO_DAMPING 0x0003 +#define AL_ECHO_FEEDBACK 0x0004 +#define AL_ECHO_SPREAD 0x0005 + +/* Flanger effect parameters */ +#define AL_FLANGER_WAVEFORM 0x0001 +#define AL_FLANGER_PHASE 0x0002 +#define AL_FLANGER_RATE 0x0003 +#define AL_FLANGER_DEPTH 0x0004 +#define AL_FLANGER_FEEDBACK 0x0005 +#define AL_FLANGER_DELAY 0x0006 + +/* Frequency shifter effect parameters */ +#define AL_FREQUENCY_SHIFTER_FREQUENCY 0x0001 +#define AL_FREQUENCY_SHIFTER_LEFT_DIRECTION 0x0002 +#define AL_FREQUENCY_SHIFTER_RIGHT_DIRECTION 0x0003 + +/* Vocal morpher effect parameters */ +#define AL_VOCAL_MORPHER_PHONEMEA 0x0001 +#define AL_VOCAL_MORPHER_PHONEMEA_COARSE_TUNING 0x0002 +#define AL_VOCAL_MORPHER_PHONEMEB 0x0003 +#define AL_VOCAL_MORPHER_PHONEMEB_COARSE_TUNING 0x0004 +#define AL_VOCAL_MORPHER_WAVEFORM 0x0005 +#define AL_VOCAL_MORPHER_RATE 0x0006 + +/* Pitchshifter effect parameters */ +#define AL_PITCH_SHIFTER_COARSE_TUNE 0x0001 +#define AL_PITCH_SHIFTER_FINE_TUNE 0x0002 + +/* Ringmodulator effect parameters */ +#define AL_RING_MODULATOR_FREQUENCY 0x0001 +#define AL_RING_MODULATOR_HIGHPASS_CUTOFF 0x0002 +#define AL_RING_MODULATOR_WAVEFORM 0x0003 + +/* Autowah effect parameters */ +#define AL_AUTOWAH_ATTACK_TIME 0x0001 +#define AL_AUTOWAH_RELEASE_TIME 0x0002 +#define AL_AUTOWAH_RESONANCE 0x0003 +#define AL_AUTOWAH_PEAK_GAIN 0x0004 + +/* Compressor effect parameters */ +#define AL_COMPRESSOR_ONOFF 0x0001 + +/* Equalizer effect parameters */ +#define AL_EQUALIZER_LOW_GAIN 0x0001 +#define AL_EQUALIZER_LOW_CUTOFF 0x0002 +#define AL_EQUALIZER_MID1_GAIN 0x0003 +#define AL_EQUALIZER_MID1_CENTER 0x0004 +#define AL_EQUALIZER_MID1_WIDTH 0x0005 +#define AL_EQUALIZER_MID2_GAIN 0x0006 +#define AL_EQUALIZER_MID2_CENTER 0x0007 +#define AL_EQUALIZER_MID2_WIDTH 0x0008 +#define AL_EQUALIZER_HIGH_GAIN 0x0009 +#define AL_EQUALIZER_HIGH_CUTOFF 0x000A + +/* Effect type */ +#define AL_EFFECT_FIRST_PARAMETER 0x0000 +#define AL_EFFECT_LAST_PARAMETER 0x8000 +#define AL_EFFECT_TYPE 0x8001 + +/* Effect types, used with the AL_EFFECT_TYPE property */ +#define AL_EFFECT_NULL 0x0000 +#define AL_EFFECT_REVERB 0x0001 +#define AL_EFFECT_CHORUS 0x0002 +#define AL_EFFECT_DISTORTION 0x0003 +#define AL_EFFECT_ECHO 0x0004 +#define AL_EFFECT_FLANGER 0x0005 +#define AL_EFFECT_FREQUENCY_SHIFTER 0x0006 +#define AL_EFFECT_VOCAL_MORPHER 0x0007 +#define AL_EFFECT_PITCH_SHIFTER 0x0008 +#define AL_EFFECT_RING_MODULATOR 0x0009 +#define AL_EFFECT_AUTOWAH 0x000A +#define AL_EFFECT_COMPRESSOR 0x000B +#define AL_EFFECT_EQUALIZER 0x000C +#define AL_EFFECT_EAXREVERB 0x8000 + +/* Auxiliary Effect Slot properties. */ +#define AL_EFFECTSLOT_EFFECT 0x0001 +#define AL_EFFECTSLOT_GAIN 0x0002 +#define AL_EFFECTSLOT_AUXILIARY_SEND_AUTO 0x0003 + +/* NULL Auxiliary Slot ID to disable a source send. */ +#define AL_EFFECTSLOT_NULL 0x0000 + + +/* Filter properties. */ + +/* Lowpass filter parameters */ +#define AL_LOWPASS_GAIN 0x0001 +#define AL_LOWPASS_GAINHF 0x0002 + +/* Highpass filter parameters */ +#define AL_HIGHPASS_GAIN 0x0001 +#define AL_HIGHPASS_GAINLF 0x0002 + +/* Bandpass filter parameters */ +#define AL_BANDPASS_GAIN 0x0001 +#define AL_BANDPASS_GAINLF 0x0002 +#define AL_BANDPASS_GAINHF 0x0003 + +/* Filter type */ +#define AL_FILTER_FIRST_PARAMETER 0x0000 +#define AL_FILTER_LAST_PARAMETER 0x8000 +#define AL_FILTER_TYPE 0x8001 + +/* Filter types, used with the AL_FILTER_TYPE property */ +#define AL_FILTER_NULL 0x0000 +#define AL_FILTER_LOWPASS 0x0001 +#define AL_FILTER_HIGHPASS 0x0002 +#define AL_FILTER_BANDPASS 0x0003 + + +/* Effect object function types. */ +typedef void (AL_APIENTRY *LPALGENEFFECTS)(ALsizei, ALuint*); +typedef void (AL_APIENTRY *LPALDELETEEFFECTS)(ALsizei, const ALuint*); +typedef ALboolean (AL_APIENTRY *LPALISEFFECT)(ALuint); +typedef void (AL_APIENTRY *LPALEFFECTI)(ALuint, ALenum, ALint); +typedef void (AL_APIENTRY *LPALEFFECTIV)(ALuint, ALenum, const ALint*); +typedef void (AL_APIENTRY *LPALEFFECTF)(ALuint, ALenum, ALfloat); +typedef void (AL_APIENTRY *LPALEFFECTFV)(ALuint, ALenum, const ALfloat*); +typedef void (AL_APIENTRY *LPALGETEFFECTI)(ALuint, ALenum, ALint*); +typedef void (AL_APIENTRY *LPALGETEFFECTIV)(ALuint, ALenum, ALint*); +typedef void (AL_APIENTRY *LPALGETEFFECTF)(ALuint, ALenum, ALfloat*); +typedef void (AL_APIENTRY *LPALGETEFFECTFV)(ALuint, ALenum, ALfloat*); + +/* Filter object function types. */ +typedef void (AL_APIENTRY *LPALGENFILTERS)(ALsizei, ALuint*); +typedef void (AL_APIENTRY *LPALDELETEFILTERS)(ALsizei, const ALuint*); +typedef ALboolean (AL_APIENTRY *LPALISFILTER)(ALuint); +typedef void (AL_APIENTRY *LPALFILTERI)(ALuint, ALenum, ALint); +typedef void (AL_APIENTRY *LPALFILTERIV)(ALuint, ALenum, const ALint*); +typedef void (AL_APIENTRY *LPALFILTERF)(ALuint, ALenum, ALfloat); +typedef void (AL_APIENTRY *LPALFILTERFV)(ALuint, ALenum, const ALfloat*); +typedef void (AL_APIENTRY *LPALGETFILTERI)(ALuint, ALenum, ALint*); +typedef void (AL_APIENTRY *LPALGETFILTERIV)(ALuint, ALenum, ALint*); +typedef void (AL_APIENTRY *LPALGETFILTERF)(ALuint, ALenum, ALfloat*); +typedef void (AL_APIENTRY *LPALGETFILTERFV)(ALuint, ALenum, ALfloat*); + +/* Auxiliary Effect Slot object function types. */ +typedef void (AL_APIENTRY *LPALGENAUXILIARYEFFECTSLOTS)(ALsizei, ALuint*); +typedef void (AL_APIENTRY *LPALDELETEAUXILIARYEFFECTSLOTS)(ALsizei, const ALuint*); +typedef ALboolean (AL_APIENTRY *LPALISAUXILIARYEFFECTSLOT)(ALuint); +typedef void (AL_APIENTRY *LPALAUXILIARYEFFECTSLOTI)(ALuint, ALenum, ALint); +typedef void (AL_APIENTRY *LPALAUXILIARYEFFECTSLOTIV)(ALuint, ALenum, const ALint*); +typedef void (AL_APIENTRY *LPALAUXILIARYEFFECTSLOTF)(ALuint, ALenum, ALfloat); +typedef void (AL_APIENTRY *LPALAUXILIARYEFFECTSLOTFV)(ALuint, ALenum, const ALfloat*); +typedef void (AL_APIENTRY *LPALGETAUXILIARYEFFECTSLOTI)(ALuint, ALenum, ALint*); +typedef void (AL_APIENTRY *LPALGETAUXILIARYEFFECTSLOTIV)(ALuint, ALenum, ALint*); +typedef void (AL_APIENTRY *LPALGETAUXILIARYEFFECTSLOTF)(ALuint, ALenum, ALfloat*); +typedef void (AL_APIENTRY *LPALGETAUXILIARYEFFECTSLOTFV)(ALuint, ALenum, ALfloat*); + +#ifdef AL_ALEXT_PROTOTYPES +AL_API void AL_APIENTRY alGenEffects(ALsizei n, ALuint *effects); +AL_API void AL_APIENTRY alDeleteEffects(ALsizei n, const ALuint *effects); +AL_API ALboolean AL_APIENTRY alIsEffect(ALuint effect); +AL_API void AL_APIENTRY alEffecti(ALuint effect, ALenum param, ALint iValue); +AL_API void AL_APIENTRY alEffectiv(ALuint effect, ALenum param, const ALint *piValues); +AL_API void AL_APIENTRY alEffectf(ALuint effect, ALenum param, ALfloat flValue); +AL_API void AL_APIENTRY alEffectfv(ALuint effect, ALenum param, const ALfloat *pflValues); +AL_API void AL_APIENTRY alGetEffecti(ALuint effect, ALenum param, ALint *piValue); +AL_API void AL_APIENTRY alGetEffectiv(ALuint effect, ALenum param, ALint *piValues); +AL_API void AL_APIENTRY alGetEffectf(ALuint effect, ALenum param, ALfloat *pflValue); +AL_API void AL_APIENTRY alGetEffectfv(ALuint effect, ALenum param, ALfloat *pflValues); + +AL_API void AL_APIENTRY alGenFilters(ALsizei n, ALuint *filters); +AL_API void AL_APIENTRY alDeleteFilters(ALsizei n, const ALuint *filters); +AL_API ALboolean AL_APIENTRY alIsFilter(ALuint filter); +AL_API void AL_APIENTRY alFilteri(ALuint filter, ALenum param, ALint iValue); +AL_API void AL_APIENTRY alFilteriv(ALuint filter, ALenum param, const ALint *piValues); +AL_API void AL_APIENTRY alFilterf(ALuint filter, ALenum param, ALfloat flValue); +AL_API void AL_APIENTRY alFilterfv(ALuint filter, ALenum param, const ALfloat *pflValues); +AL_API void AL_APIENTRY alGetFilteri(ALuint filter, ALenum param, ALint *piValue); +AL_API void AL_APIENTRY alGetFilteriv(ALuint filter, ALenum param, ALint *piValues); +AL_API void AL_APIENTRY alGetFilterf(ALuint filter, ALenum param, ALfloat *pflValue); +AL_API void AL_APIENTRY alGetFilterfv(ALuint filter, ALenum param, ALfloat *pflValues); + +AL_API void AL_APIENTRY alGenAuxiliaryEffectSlots(ALsizei n, ALuint *effectslots); +AL_API void AL_APIENTRY alDeleteAuxiliaryEffectSlots(ALsizei n, const ALuint *effectslots); +AL_API ALboolean AL_APIENTRY alIsAuxiliaryEffectSlot(ALuint effectslot); +AL_API void AL_APIENTRY alAuxiliaryEffectSloti(ALuint effectslot, ALenum param, ALint iValue); +AL_API void AL_APIENTRY alAuxiliaryEffectSlotiv(ALuint effectslot, ALenum param, const ALint *piValues); +AL_API void AL_APIENTRY alAuxiliaryEffectSlotf(ALuint effectslot, ALenum param, ALfloat flValue); +AL_API void AL_APIENTRY alAuxiliaryEffectSlotfv(ALuint effectslot, ALenum param, const ALfloat *pflValues); +AL_API void AL_APIENTRY alGetAuxiliaryEffectSloti(ALuint effectslot, ALenum param, ALint *piValue); +AL_API void AL_APIENTRY alGetAuxiliaryEffectSlotiv(ALuint effectslot, ALenum param, ALint *piValues); +AL_API void AL_APIENTRY alGetAuxiliaryEffectSlotf(ALuint effectslot, ALenum param, ALfloat *pflValue); +AL_API void AL_APIENTRY alGetAuxiliaryEffectSlotfv(ALuint effectslot, ALenum param, ALfloat *pflValues); +#endif + +/* Filter ranges and defaults. */ + +/* Lowpass filter */ +#define AL_LOWPASS_MIN_GAIN (0.0f) +#define AL_LOWPASS_MAX_GAIN (1.0f) +#define AL_LOWPASS_DEFAULT_GAIN (1.0f) + +#define AL_LOWPASS_MIN_GAINHF (0.0f) +#define AL_LOWPASS_MAX_GAINHF (1.0f) +#define AL_LOWPASS_DEFAULT_GAINHF (1.0f) + +/* Highpass filter */ +#define AL_HIGHPASS_MIN_GAIN (0.0f) +#define AL_HIGHPASS_MAX_GAIN (1.0f) +#define AL_HIGHPASS_DEFAULT_GAIN (1.0f) + +#define AL_HIGHPASS_MIN_GAINLF (0.0f) +#define AL_HIGHPASS_MAX_GAINLF (1.0f) +#define AL_HIGHPASS_DEFAULT_GAINLF (1.0f) + +/* Bandpass filter */ +#define AL_BANDPASS_MIN_GAIN (0.0f) +#define AL_BANDPASS_MAX_GAIN (1.0f) +#define AL_BANDPASS_DEFAULT_GAIN (1.0f) + +#define AL_BANDPASS_MIN_GAINHF (0.0f) +#define AL_BANDPASS_MAX_GAINHF (1.0f) +#define AL_BANDPASS_DEFAULT_GAINHF (1.0f) + +#define AL_BANDPASS_MIN_GAINLF (0.0f) +#define AL_BANDPASS_MAX_GAINLF (1.0f) +#define AL_BANDPASS_DEFAULT_GAINLF (1.0f) + + +/* Effect parameter ranges and defaults. */ + +/* Standard reverb effect */ +#define AL_REVERB_MIN_DENSITY (0.0f) +#define AL_REVERB_MAX_DENSITY (1.0f) +#define AL_REVERB_DEFAULT_DENSITY (1.0f) + +#define AL_REVERB_MIN_DIFFUSION (0.0f) +#define AL_REVERB_MAX_DIFFUSION (1.0f) +#define AL_REVERB_DEFAULT_DIFFUSION (1.0f) + +#define AL_REVERB_MIN_GAIN (0.0f) +#define AL_REVERB_MAX_GAIN (1.0f) +#define AL_REVERB_DEFAULT_GAIN (0.32f) + +#define AL_REVERB_MIN_GAINHF (0.0f) +#define AL_REVERB_MAX_GAINHF (1.0f) +#define AL_REVERB_DEFAULT_GAINHF (0.89f) + +#define AL_REVERB_MIN_DECAY_TIME (0.1f) +#define AL_REVERB_MAX_DECAY_TIME (20.0f) +#define AL_REVERB_DEFAULT_DECAY_TIME (1.49f) + +#define AL_REVERB_MIN_DECAY_HFRATIO (0.1f) +#define AL_REVERB_MAX_DECAY_HFRATIO (2.0f) +#define AL_REVERB_DEFAULT_DECAY_HFRATIO (0.83f) + +#define AL_REVERB_MIN_REFLECTIONS_GAIN (0.0f) +#define AL_REVERB_MAX_REFLECTIONS_GAIN (3.16f) +#define AL_REVERB_DEFAULT_REFLECTIONS_GAIN (0.05f) + +#define AL_REVERB_MIN_REFLECTIONS_DELAY (0.0f) +#define AL_REVERB_MAX_REFLECTIONS_DELAY (0.3f) +#define AL_REVERB_DEFAULT_REFLECTIONS_DELAY (0.007f) + +#define AL_REVERB_MIN_LATE_REVERB_GAIN (0.0f) +#define AL_REVERB_MAX_LATE_REVERB_GAIN (10.0f) +#define AL_REVERB_DEFAULT_LATE_REVERB_GAIN (1.26f) + +#define AL_REVERB_MIN_LATE_REVERB_DELAY (0.0f) +#define AL_REVERB_MAX_LATE_REVERB_DELAY (0.1f) +#define AL_REVERB_DEFAULT_LATE_REVERB_DELAY (0.011f) + +#define AL_REVERB_MIN_AIR_ABSORPTION_GAINHF (0.892f) +#define AL_REVERB_MAX_AIR_ABSORPTION_GAINHF (1.0f) +#define AL_REVERB_DEFAULT_AIR_ABSORPTION_GAINHF (0.994f) + +#define AL_REVERB_MIN_ROOM_ROLLOFF_FACTOR (0.0f) +#define AL_REVERB_MAX_ROOM_ROLLOFF_FACTOR (10.0f) +#define AL_REVERB_DEFAULT_ROOM_ROLLOFF_FACTOR (0.0f) + +#define AL_REVERB_MIN_DECAY_HFLIMIT AL_FALSE +#define AL_REVERB_MAX_DECAY_HFLIMIT AL_TRUE +#define AL_REVERB_DEFAULT_DECAY_HFLIMIT AL_TRUE + +/* EAX reverb effect */ +#define AL_EAXREVERB_MIN_DENSITY (0.0f) +#define AL_EAXREVERB_MAX_DENSITY (1.0f) +#define AL_EAXREVERB_DEFAULT_DENSITY (1.0f) + +#define AL_EAXREVERB_MIN_DIFFUSION (0.0f) +#define AL_EAXREVERB_MAX_DIFFUSION (1.0f) +#define AL_EAXREVERB_DEFAULT_DIFFUSION (1.0f) + +#define AL_EAXREVERB_MIN_GAIN (0.0f) +#define AL_EAXREVERB_MAX_GAIN (1.0f) +#define AL_EAXREVERB_DEFAULT_GAIN (0.32f) + +#define AL_EAXREVERB_MIN_GAINHF (0.0f) +#define AL_EAXREVERB_MAX_GAINHF (1.0f) +#define AL_EAXREVERB_DEFAULT_GAINHF (0.89f) + +#define AL_EAXREVERB_MIN_GAINLF (0.0f) +#define AL_EAXREVERB_MAX_GAINLF (1.0f) +#define AL_EAXREVERB_DEFAULT_GAINLF (1.0f) + +#define AL_EAXREVERB_MIN_DECAY_TIME (0.1f) +#define AL_EAXREVERB_MAX_DECAY_TIME (20.0f) +#define AL_EAXREVERB_DEFAULT_DECAY_TIME (1.49f) + +#define AL_EAXREVERB_MIN_DECAY_HFRATIO (0.1f) +#define AL_EAXREVERB_MAX_DECAY_HFRATIO (2.0f) +#define AL_EAXREVERB_DEFAULT_DECAY_HFRATIO (0.83f) + +#define AL_EAXREVERB_MIN_DECAY_LFRATIO (0.1f) +#define AL_EAXREVERB_MAX_DECAY_LFRATIO (2.0f) +#define AL_EAXREVERB_DEFAULT_DECAY_LFRATIO (1.0f) + +#define AL_EAXREVERB_MIN_REFLECTIONS_GAIN (0.0f) +#define AL_EAXREVERB_MAX_REFLECTIONS_GAIN (3.16f) +#define AL_EAXREVERB_DEFAULT_REFLECTIONS_GAIN (0.05f) + +#define AL_EAXREVERB_MIN_REFLECTIONS_DELAY (0.0f) +#define AL_EAXREVERB_MAX_REFLECTIONS_DELAY (0.3f) +#define AL_EAXREVERB_DEFAULT_REFLECTIONS_DELAY (0.007f) + +#define AL_EAXREVERB_DEFAULT_REFLECTIONS_PAN_XYZ (0.0f) + +#define AL_EAXREVERB_MIN_LATE_REVERB_GAIN (0.0f) +#define AL_EAXREVERB_MAX_LATE_REVERB_GAIN (10.0f) +#define AL_EAXREVERB_DEFAULT_LATE_REVERB_GAIN (1.26f) + +#define AL_EAXREVERB_MIN_LATE_REVERB_DELAY (0.0f) +#define AL_EAXREVERB_MAX_LATE_REVERB_DELAY (0.1f) +#define AL_EAXREVERB_DEFAULT_LATE_REVERB_DELAY (0.011f) + +#define AL_EAXREVERB_DEFAULT_LATE_REVERB_PAN_XYZ (0.0f) + +#define AL_EAXREVERB_MIN_ECHO_TIME (0.075f) +#define AL_EAXREVERB_MAX_ECHO_TIME (0.25f) +#define AL_EAXREVERB_DEFAULT_ECHO_TIME (0.25f) + +#define AL_EAXREVERB_MIN_ECHO_DEPTH (0.0f) +#define AL_EAXREVERB_MAX_ECHO_DEPTH (1.0f) +#define AL_EAXREVERB_DEFAULT_ECHO_DEPTH (0.0f) + +#define AL_EAXREVERB_MIN_MODULATION_TIME (0.04f) +#define AL_EAXREVERB_MAX_MODULATION_TIME (4.0f) +#define AL_EAXREVERB_DEFAULT_MODULATION_TIME (0.25f) + +#define AL_EAXREVERB_MIN_MODULATION_DEPTH (0.0f) +#define AL_EAXREVERB_MAX_MODULATION_DEPTH (1.0f) +#define AL_EAXREVERB_DEFAULT_MODULATION_DEPTH (0.0f) + +#define AL_EAXREVERB_MIN_AIR_ABSORPTION_GAINHF (0.892f) +#define AL_EAXREVERB_MAX_AIR_ABSORPTION_GAINHF (1.0f) +#define AL_EAXREVERB_DEFAULT_AIR_ABSORPTION_GAINHF (0.994f) + +#define AL_EAXREVERB_MIN_HFREFERENCE (1000.0f) +#define AL_EAXREVERB_MAX_HFREFERENCE (20000.0f) +#define AL_EAXREVERB_DEFAULT_HFREFERENCE (5000.0f) + +#define AL_EAXREVERB_MIN_LFREFERENCE (20.0f) +#define AL_EAXREVERB_MAX_LFREFERENCE (1000.0f) +#define AL_EAXREVERB_DEFAULT_LFREFERENCE (250.0f) + +#define AL_EAXREVERB_MIN_ROOM_ROLLOFF_FACTOR (0.0f) +#define AL_EAXREVERB_MAX_ROOM_ROLLOFF_FACTOR (10.0f) +#define AL_EAXREVERB_DEFAULT_ROOM_ROLLOFF_FACTOR (0.0f) + +#define AL_EAXREVERB_MIN_DECAY_HFLIMIT AL_FALSE +#define AL_EAXREVERB_MAX_DECAY_HFLIMIT AL_TRUE +#define AL_EAXREVERB_DEFAULT_DECAY_HFLIMIT AL_TRUE + +/* Chorus effect */ +#define AL_CHORUS_WAVEFORM_SINUSOID (0) +#define AL_CHORUS_WAVEFORM_TRIANGLE (1) + +#define AL_CHORUS_MIN_WAVEFORM (0) +#define AL_CHORUS_MAX_WAVEFORM (1) +#define AL_CHORUS_DEFAULT_WAVEFORM (1) + +#define AL_CHORUS_MIN_PHASE (-180) +#define AL_CHORUS_MAX_PHASE (180) +#define AL_CHORUS_DEFAULT_PHASE (90) + +#define AL_CHORUS_MIN_RATE (0.0f) +#define AL_CHORUS_MAX_RATE (10.0f) +#define AL_CHORUS_DEFAULT_RATE (1.1f) + +#define AL_CHORUS_MIN_DEPTH (0.0f) +#define AL_CHORUS_MAX_DEPTH (1.0f) +#define AL_CHORUS_DEFAULT_DEPTH (0.1f) + +#define AL_CHORUS_MIN_FEEDBACK (-1.0f) +#define AL_CHORUS_MAX_FEEDBACK (1.0f) +#define AL_CHORUS_DEFAULT_FEEDBACK (0.25f) + +#define AL_CHORUS_MIN_DELAY (0.0f) +#define AL_CHORUS_MAX_DELAY (0.016f) +#define AL_CHORUS_DEFAULT_DELAY (0.016f) + +/* Distortion effect */ +#define AL_DISTORTION_MIN_EDGE (0.0f) +#define AL_DISTORTION_MAX_EDGE (1.0f) +#define AL_DISTORTION_DEFAULT_EDGE (0.2f) + +#define AL_DISTORTION_MIN_GAIN (0.01f) +#define AL_DISTORTION_MAX_GAIN (1.0f) +#define AL_DISTORTION_DEFAULT_GAIN (0.05f) + +#define AL_DISTORTION_MIN_LOWPASS_CUTOFF (80.0f) +#define AL_DISTORTION_MAX_LOWPASS_CUTOFF (24000.0f) +#define AL_DISTORTION_DEFAULT_LOWPASS_CUTOFF (8000.0f) + +#define AL_DISTORTION_MIN_EQCENTER (80.0f) +#define AL_DISTORTION_MAX_EQCENTER (24000.0f) +#define AL_DISTORTION_DEFAULT_EQCENTER (3600.0f) + +#define AL_DISTORTION_MIN_EQBANDWIDTH (80.0f) +#define AL_DISTORTION_MAX_EQBANDWIDTH (24000.0f) +#define AL_DISTORTION_DEFAULT_EQBANDWIDTH (3600.0f) + +/* Echo effect */ +#define AL_ECHO_MIN_DELAY (0.0f) +#define AL_ECHO_MAX_DELAY (0.207f) +#define AL_ECHO_DEFAULT_DELAY (0.1f) + +#define AL_ECHO_MIN_LRDELAY (0.0f) +#define AL_ECHO_MAX_LRDELAY (0.404f) +#define AL_ECHO_DEFAULT_LRDELAY (0.1f) + +#define AL_ECHO_MIN_DAMPING (0.0f) +#define AL_ECHO_MAX_DAMPING (0.99f) +#define AL_ECHO_DEFAULT_DAMPING (0.5f) + +#define AL_ECHO_MIN_FEEDBACK (0.0f) +#define AL_ECHO_MAX_FEEDBACK (1.0f) +#define AL_ECHO_DEFAULT_FEEDBACK (0.5f) + +#define AL_ECHO_MIN_SPREAD (-1.0f) +#define AL_ECHO_MAX_SPREAD (1.0f) +#define AL_ECHO_DEFAULT_SPREAD (-1.0f) + +/* Flanger effect */ +#define AL_FLANGER_WAVEFORM_SINUSOID (0) +#define AL_FLANGER_WAVEFORM_TRIANGLE (1) + +#define AL_FLANGER_MIN_WAVEFORM (0) +#define AL_FLANGER_MAX_WAVEFORM (1) +#define AL_FLANGER_DEFAULT_WAVEFORM (1) + +#define AL_FLANGER_MIN_PHASE (-180) +#define AL_FLANGER_MAX_PHASE (180) +#define AL_FLANGER_DEFAULT_PHASE (0) + +#define AL_FLANGER_MIN_RATE (0.0f) +#define AL_FLANGER_MAX_RATE (10.0f) +#define AL_FLANGER_DEFAULT_RATE (0.27f) + +#define AL_FLANGER_MIN_DEPTH (0.0f) +#define AL_FLANGER_MAX_DEPTH (1.0f) +#define AL_FLANGER_DEFAULT_DEPTH (1.0f) + +#define AL_FLANGER_MIN_FEEDBACK (-1.0f) +#define AL_FLANGER_MAX_FEEDBACK (1.0f) +#define AL_FLANGER_DEFAULT_FEEDBACK (-0.5f) + +#define AL_FLANGER_MIN_DELAY (0.0f) +#define AL_FLANGER_MAX_DELAY (0.004f) +#define AL_FLANGER_DEFAULT_DELAY (0.002f) + +/* Frequency shifter effect */ +#define AL_FREQUENCY_SHIFTER_MIN_FREQUENCY (0.0f) +#define AL_FREQUENCY_SHIFTER_MAX_FREQUENCY (24000.0f) +#define AL_FREQUENCY_SHIFTER_DEFAULT_FREQUENCY (0.0f) + +#define AL_FREQUENCY_SHIFTER_MIN_LEFT_DIRECTION (0) +#define AL_FREQUENCY_SHIFTER_MAX_LEFT_DIRECTION (2) +#define AL_FREQUENCY_SHIFTER_DEFAULT_LEFT_DIRECTION (0) + +#define AL_FREQUENCY_SHIFTER_DIRECTION_DOWN (0) +#define AL_FREQUENCY_SHIFTER_DIRECTION_UP (1) +#define AL_FREQUENCY_SHIFTER_DIRECTION_OFF (2) + +#define AL_FREQUENCY_SHIFTER_MIN_RIGHT_DIRECTION (0) +#define AL_FREQUENCY_SHIFTER_MAX_RIGHT_DIRECTION (2) +#define AL_FREQUENCY_SHIFTER_DEFAULT_RIGHT_DIRECTION (0) + +/* Vocal morpher effect */ +#define AL_VOCAL_MORPHER_MIN_PHONEMEA (0) +#define AL_VOCAL_MORPHER_MAX_PHONEMEA (29) +#define AL_VOCAL_MORPHER_DEFAULT_PHONEMEA (0) + +#define AL_VOCAL_MORPHER_MIN_PHONEMEA_COARSE_TUNING (-24) +#define AL_VOCAL_MORPHER_MAX_PHONEMEA_COARSE_TUNING (24) +#define AL_VOCAL_MORPHER_DEFAULT_PHONEMEA_COARSE_TUNING (0) + +#define AL_VOCAL_MORPHER_MIN_PHONEMEB (0) +#define AL_VOCAL_MORPHER_MAX_PHONEMEB (29) +#define AL_VOCAL_MORPHER_DEFAULT_PHONEMEB (10) + +#define AL_VOCAL_MORPHER_MIN_PHONEMEB_COARSE_TUNING (-24) +#define AL_VOCAL_MORPHER_MAX_PHONEMEB_COARSE_TUNING (24) +#define AL_VOCAL_MORPHER_DEFAULT_PHONEMEB_COARSE_TUNING (0) + +#define AL_VOCAL_MORPHER_PHONEME_A (0) +#define AL_VOCAL_MORPHER_PHONEME_E (1) +#define AL_VOCAL_MORPHER_PHONEME_I (2) +#define AL_VOCAL_MORPHER_PHONEME_O (3) +#define AL_VOCAL_MORPHER_PHONEME_U (4) +#define AL_VOCAL_MORPHER_PHONEME_AA (5) +#define AL_VOCAL_MORPHER_PHONEME_AE (6) +#define AL_VOCAL_MORPHER_PHONEME_AH (7) +#define AL_VOCAL_MORPHER_PHONEME_AO (8) +#define AL_VOCAL_MORPHER_PHONEME_EH (9) +#define AL_VOCAL_MORPHER_PHONEME_ER (10) +#define AL_VOCAL_MORPHER_PHONEME_IH (11) +#define AL_VOCAL_MORPHER_PHONEME_IY (12) +#define AL_VOCAL_MORPHER_PHONEME_UH (13) +#define AL_VOCAL_MORPHER_PHONEME_UW (14) +#define AL_VOCAL_MORPHER_PHONEME_B (15) +#define AL_VOCAL_MORPHER_PHONEME_D (16) +#define AL_VOCAL_MORPHER_PHONEME_F (17) +#define AL_VOCAL_MORPHER_PHONEME_G (18) +#define AL_VOCAL_MORPHER_PHONEME_J (19) +#define AL_VOCAL_MORPHER_PHONEME_K (20) +#define AL_VOCAL_MORPHER_PHONEME_L (21) +#define AL_VOCAL_MORPHER_PHONEME_M (22) +#define AL_VOCAL_MORPHER_PHONEME_N (23) +#define AL_VOCAL_MORPHER_PHONEME_P (24) +#define AL_VOCAL_MORPHER_PHONEME_R (25) +#define AL_VOCAL_MORPHER_PHONEME_S (26) +#define AL_VOCAL_MORPHER_PHONEME_T (27) +#define AL_VOCAL_MORPHER_PHONEME_V (28) +#define AL_VOCAL_MORPHER_PHONEME_Z (29) + +#define AL_VOCAL_MORPHER_WAVEFORM_SINUSOID (0) +#define AL_VOCAL_MORPHER_WAVEFORM_TRIANGLE (1) +#define AL_VOCAL_MORPHER_WAVEFORM_SAWTOOTH (2) + +#define AL_VOCAL_MORPHER_MIN_WAVEFORM (0) +#define AL_VOCAL_MORPHER_MAX_WAVEFORM (2) +#define AL_VOCAL_MORPHER_DEFAULT_WAVEFORM (0) + +#define AL_VOCAL_MORPHER_MIN_RATE (0.0f) +#define AL_VOCAL_MORPHER_MAX_RATE (10.0f) +#define AL_VOCAL_MORPHER_DEFAULT_RATE (1.41f) + +/* Pitch shifter effect */ +#define AL_PITCH_SHIFTER_MIN_COARSE_TUNE (-12) +#define AL_PITCH_SHIFTER_MAX_COARSE_TUNE (12) +#define AL_PITCH_SHIFTER_DEFAULT_COARSE_TUNE (12) + +#define AL_PITCH_SHIFTER_MIN_FINE_TUNE (-50) +#define AL_PITCH_SHIFTER_MAX_FINE_TUNE (50) +#define AL_PITCH_SHIFTER_DEFAULT_FINE_TUNE (0) + +/* Ring modulator effect */ +#define AL_RING_MODULATOR_MIN_FREQUENCY (0.0f) +#define AL_RING_MODULATOR_MAX_FREQUENCY (8000.0f) +#define AL_RING_MODULATOR_DEFAULT_FREQUENCY (440.0f) + +#define AL_RING_MODULATOR_MIN_HIGHPASS_CUTOFF (0.0f) +#define AL_RING_MODULATOR_MAX_HIGHPASS_CUTOFF (24000.0f) +#define AL_RING_MODULATOR_DEFAULT_HIGHPASS_CUTOFF (800.0f) + +#define AL_RING_MODULATOR_SINUSOID (0) +#define AL_RING_MODULATOR_SAWTOOTH (1) +#define AL_RING_MODULATOR_SQUARE (2) + +#define AL_RING_MODULATOR_MIN_WAVEFORM (0) +#define AL_RING_MODULATOR_MAX_WAVEFORM (2) +#define AL_RING_MODULATOR_DEFAULT_WAVEFORM (0) + +/* Autowah effect */ +#define AL_AUTOWAH_MIN_ATTACK_TIME (0.0001f) +#define AL_AUTOWAH_MAX_ATTACK_TIME (1.0f) +#define AL_AUTOWAH_DEFAULT_ATTACK_TIME (0.06f) + +#define AL_AUTOWAH_MIN_RELEASE_TIME (0.0001f) +#define AL_AUTOWAH_MAX_RELEASE_TIME (1.0f) +#define AL_AUTOWAH_DEFAULT_RELEASE_TIME (0.06f) + +#define AL_AUTOWAH_MIN_RESONANCE (2.0f) +#define AL_AUTOWAH_MAX_RESONANCE (1000.0f) +#define AL_AUTOWAH_DEFAULT_RESONANCE (1000.0f) + +#define AL_AUTOWAH_MIN_PEAK_GAIN (0.00003f) +#define AL_AUTOWAH_MAX_PEAK_GAIN (31621.0f) +#define AL_AUTOWAH_DEFAULT_PEAK_GAIN (11.22f) + +/* Compressor effect */ +#define AL_COMPRESSOR_MIN_ONOFF (0) +#define AL_COMPRESSOR_MAX_ONOFF (1) +#define AL_COMPRESSOR_DEFAULT_ONOFF (1) + +/* Equalizer effect */ +#define AL_EQUALIZER_MIN_LOW_GAIN (0.126f) +#define AL_EQUALIZER_MAX_LOW_GAIN (7.943f) +#define AL_EQUALIZER_DEFAULT_LOW_GAIN (1.0f) + +#define AL_EQUALIZER_MIN_LOW_CUTOFF (50.0f) +#define AL_EQUALIZER_MAX_LOW_CUTOFF (800.0f) +#define AL_EQUALIZER_DEFAULT_LOW_CUTOFF (200.0f) + +#define AL_EQUALIZER_MIN_MID1_GAIN (0.126f) +#define AL_EQUALIZER_MAX_MID1_GAIN (7.943f) +#define AL_EQUALIZER_DEFAULT_MID1_GAIN (1.0f) + +#define AL_EQUALIZER_MIN_MID1_CENTER (200.0f) +#define AL_EQUALIZER_MAX_MID1_CENTER (3000.0f) +#define AL_EQUALIZER_DEFAULT_MID1_CENTER (500.0f) + +#define AL_EQUALIZER_MIN_MID1_WIDTH (0.01f) +#define AL_EQUALIZER_MAX_MID1_WIDTH (1.0f) +#define AL_EQUALIZER_DEFAULT_MID1_WIDTH (1.0f) + +#define AL_EQUALIZER_MIN_MID2_GAIN (0.126f) +#define AL_EQUALIZER_MAX_MID2_GAIN (7.943f) +#define AL_EQUALIZER_DEFAULT_MID2_GAIN (1.0f) + +#define AL_EQUALIZER_MIN_MID2_CENTER (1000.0f) +#define AL_EQUALIZER_MAX_MID2_CENTER (8000.0f) +#define AL_EQUALIZER_DEFAULT_MID2_CENTER (3000.0f) + +#define AL_EQUALIZER_MIN_MID2_WIDTH (0.01f) +#define AL_EQUALIZER_MAX_MID2_WIDTH (1.0f) +#define AL_EQUALIZER_DEFAULT_MID2_WIDTH (1.0f) + +#define AL_EQUALIZER_MIN_HIGH_GAIN (0.126f) +#define AL_EQUALIZER_MAX_HIGH_GAIN (7.943f) +#define AL_EQUALIZER_DEFAULT_HIGH_GAIN (1.0f) + +#define AL_EQUALIZER_MIN_HIGH_CUTOFF (4000.0f) +#define AL_EQUALIZER_MAX_HIGH_CUTOFF (16000.0f) +#define AL_EQUALIZER_DEFAULT_HIGH_CUTOFF (6000.0f) + + +/* Source parameter value ranges and defaults. */ +#define AL_MIN_AIR_ABSORPTION_FACTOR (0.0f) +#define AL_MAX_AIR_ABSORPTION_FACTOR (10.0f) +#define AL_DEFAULT_AIR_ABSORPTION_FACTOR (0.0f) + +#define AL_MIN_ROOM_ROLLOFF_FACTOR (0.0f) +#define AL_MAX_ROOM_ROLLOFF_FACTOR (10.0f) +#define AL_DEFAULT_ROOM_ROLLOFF_FACTOR (0.0f) + +#define AL_MIN_CONE_OUTER_GAINHF (0.0f) +#define AL_MAX_CONE_OUTER_GAINHF (1.0f) +#define AL_DEFAULT_CONE_OUTER_GAINHF (1.0f) + +#define AL_MIN_DIRECT_FILTER_GAINHF_AUTO AL_FALSE +#define AL_MAX_DIRECT_FILTER_GAINHF_AUTO AL_TRUE +#define AL_DEFAULT_DIRECT_FILTER_GAINHF_AUTO AL_TRUE + +#define AL_MIN_AUXILIARY_SEND_FILTER_GAIN_AUTO AL_FALSE +#define AL_MAX_AUXILIARY_SEND_FILTER_GAIN_AUTO AL_TRUE +#define AL_DEFAULT_AUXILIARY_SEND_FILTER_GAIN_AUTO AL_TRUE + +#define AL_MIN_AUXILIARY_SEND_FILTER_GAINHF_AUTO AL_FALSE +#define AL_MAX_AUXILIARY_SEND_FILTER_GAINHF_AUTO AL_TRUE +#define AL_DEFAULT_AUXILIARY_SEND_FILTER_GAINHF_AUTO AL_TRUE + + +/* Listener parameter value ranges and defaults. */ +#define AL_MIN_METERS_PER_UNIT FLT_MIN +#define AL_MAX_METERS_PER_UNIT FLT_MAX +#define AL_DEFAULT_METERS_PER_UNIT (1.0f) + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* AL_EFX_H */ diff --git a/3rdparty/OpenAL/libs/Win64/OpenAL32.def b/3rdparty/OpenAL/libs/Win64/OpenAL32.def new file mode 100644 index 0000000000..a5c4c00097 --- /dev/null +++ b/3rdparty/OpenAL/libs/Win64/OpenAL32.def @@ -0,0 +1,176 @@ +EXPORTS + alAuxiliaryEffectSlotPlaySOFT + alAuxiliaryEffectSlotPlayvSOFT + alAuxiliaryEffectSlotStopSOFT + alAuxiliaryEffectSlotStopvSOFT + alAuxiliaryEffectSlotf + alAuxiliaryEffectSlotfv + alAuxiliaryEffectSloti + alAuxiliaryEffectSlotiv + alBuffer3f + alBuffer3i + alBufferCallbackSOFT + alBufferData + alBufferSamplesSOFT + alBufferStorageSOFT + alBufferSubDataSOFT + alBufferSubSamplesSOFT + alBufferf + alBufferfv + alBufferi + alBufferiv + alDeferUpdatesSOFT + alDeleteAuxiliaryEffectSlots + alDeleteBuffers + alDeleteEffects + alDeleteFilters + alDeleteSources + alDisable + alDistanceModel + alDopplerFactor + alDopplerVelocity + alEffectf + alEffectfv + alEffecti + alEffectiv + alEnable + alEventCallbackSOFT + alEventControlSOFT + alFilterf + alFilterfv + alFilteri + alFilteriv + alFlushMappedBufferSOFT + alGenAuxiliaryEffectSlots + alGenBuffers + alGenEffects + alGenFilters + alGenSources + alGetAuxiliaryEffectSlotf + alGetAuxiliaryEffectSlotfv + alGetAuxiliaryEffectSloti + alGetAuxiliaryEffectSlotiv + alGetBoolean + alGetBooleanv + alGetBuffer3PtrSOFT + alGetBuffer3f + alGetBuffer3i + alGetBufferPtrSOFT + alGetBufferPtrvSOFT + alGetBufferSamplesSOFT + alGetBufferf + alGetBufferfv + alGetBufferi + alGetBufferiv + alGetDouble + alGetDoublev + alGetEffectf + alGetEffectfv + alGetEffecti + alGetEffectiv + alGetEnumValue + alGetError + alGetFilterf + alGetFilterfv + alGetFilteri + alGetFilteriv + alGetFloat + alGetFloatv + alGetInteger + alGetInteger64SOFT + alGetInteger64vSOFT + alGetIntegerv + alGetListener3f + alGetListener3i + alGetListenerf + alGetListenerfv + alGetListeneri + alGetListeneriv + alGetPointerSOFT + alGetPointervSOFT + alGetProcAddress + alGetSource3dSOFT + alGetSource3f + alGetSource3i + alGetSource3i64SOFT + alGetSourcedSOFT + alGetSourcedvSOFT + alGetSourcef + alGetSourcefv + alGetSourcei + alGetSourcei64SOFT + alGetSourcei64vSOFT + alGetSourceiv + alGetString + alGetStringiSOFT + alIsAuxiliaryEffectSlot + alIsBuffer + alIsBufferFormatSupportedSOFT + alIsEffect + alIsEnabled + alIsExtensionPresent + alIsFilter + alIsSource + alListener3f + alListener3i + alListenerf + alListenerfv + alListeneri + alListeneriv + alMapBufferSOFT + alProcessUpdatesSOFT + alSource3dSOFT + alSource3f + alSource3i + alSource3i64SOFT + alSourcePause + alSourcePausev + alSourcePlay + alSourcePlayv + alSourceQueueBuffers + alSourceRewind + alSourceRewindv + alSourceStop + alSourceStopv + alSourceUnqueueBuffers + alSourcedSOFT + alSourcedvSOFT + alSourcef + alSourcefv + alSourcei + alSourcei64SOFT + alSourcei64vSOFT + alSourceiv + alSpeedOfSound + alUnmapBufferSOFT + alcCaptureCloseDevice + alcCaptureOpenDevice + alcCaptureSamples + alcCaptureStart + alcCaptureStop + alcCloseDevice + alcCreateContext + alcDestroyContext + alcDevicePauseSOFT + alcDeviceResumeSOFT + alcGetContextsDevice + alcGetCurrentContext + alcGetEnumValue + alcGetError + alcGetInteger64vSOFT + alcGetIntegerv + alcGetProcAddress + alcGetString + alcGetStringiSOFT + alcGetThreadContext + alcIsExtensionPresent + alcIsRenderFormatSupportedSOFT + alcLoopbackOpenDeviceSOFT + alcMakeContextCurrent + alcOpenDevice + alcProcessContext + alcRenderSamplesSOFT + alcResetDeviceSOFT + alcSetThreadContext + alcSuspendContext + alsoft_get_version diff --git a/3rdparty/OpenAL/libs/Win64/OpenAL32.lib b/3rdparty/OpenAL/libs/Win64/OpenAL32.lib new file mode 100644 index 0000000000..9c6ec3e4fc Binary files /dev/null and b/3rdparty/OpenAL/libs/Win64/OpenAL32.lib differ diff --git a/3rdparty/OpenAL/openal-soft b/3rdparty/OpenAL/openal-soft deleted file mode 160000 index dc7d7054a5..0000000000 --- a/3rdparty/OpenAL/openal-soft +++ /dev/null @@ -1 +0,0 @@ -Subproject commit dc7d7054a5b4f3bec1dc23a42fd616a0847af948 diff --git a/3rdparty/OpenAL/openal-soft.vcxproj b/3rdparty/OpenAL/openal-soft.vcxproj deleted file mode 100644 index d309093527..0000000000 --- a/3rdparty/OpenAL/openal-soft.vcxproj +++ /dev/null @@ -1,109 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - - - - - - - - - MakeFileProj - openal-soft - {8846A9AA-5539-4C91-8301-F54260E1A07A} - - - - - - Makefile - true - - - Makefile - false - - - - x64 - - - - - - - - - - - call vsdevcmd.bat -arch=amd64 - cd "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" - cmake -G Ninja -DCMAKE_CXX_COMPILER="cl.exe" -DCMAKE_C_COMPILER="cl.exe" -DCMAKE_BUILD_TYPE="Release" -DCMAKE_INSTALL_PREFIX="./Release" -DCMAKE_SYSTEM_VERSION=10.0 -DLIBTYPE=STATIC -DFORCE_STATIC_VCRT=true -DALSOFT_UTILS=false -DALSOFT_EXAMPLES=false -DALSOFT_INSTALL=false -DALSOFT_INSTALL_CONFIG=false -DALSOFT_INSTALL_HRTF_DATA=false -DALSOFT_INSTALL_AMBDEC_PRESETS=false -DALSOFT_INSTALL_EXAMPLES=false -DALSOFT_INSTALL_UTILS=false "$(SolutionDir)3rdparty\OpenAL\openal-soft" - - call vsdevcmd.bat -arch=amd64 - cd "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" - cmake -G Ninja -DCMAKE_CXX_COMPILER="cl.exe" -DCMAKE_C_COMPILER="cl.exe" -DCMAKE_BUILD_TYPE="Debug" -DCMAKE_INSTALL_PREFIX="./Debug" -DCMAKE_SYSTEM_VERSION=10.0 -DLIBTYPE=STATIC -DALSOFT_UTILS=false -DALSOFT_EXAMPLES=false -DALSOFT_INSTALL=false -DALSOFT_INSTALL_CONFIG=false -DALSOFT_INSTALL_HRTF_DATA=false -DALSOFT_INSTALL_AMBDEC_PRESETS=false -DALSOFT_INSTALL_EXAMPLES=false -DALSOFT_INSTALL_UTILS=false "$(SolutionDir)3rdparty\OpenAL\openal-soft" - - - echo Copying.. - copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\OpenAL32.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)" - - - echo Cleaning.. - del "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\OpenAL32.lib" - rmdir /s /q "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" - - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - $(CmakeDebugCLI) - ninja - $(CmakeCopyCLI) - - - $(CmakeCleanCLI) - $(CmakeDebugCLI) - ninja - $(CmakeCopyCLI) - - - $(CmakeCleanCLI) - - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - $(CmakeReleaseCLI) - ninja - $(CmakeCopyCLI) - - - $(CmakeCleanCLI) - $(CmakeReleaseCLI) - ninja - $(CmakeCopyCLI) - - - $(CmakeCleanCLI) - - - - - - - - \ No newline at end of file diff --git a/3rdparty/SPIRV/.gitignore b/3rdparty/SPIRV/.gitignore new file mode 100644 index 0000000000..f3b5fb62e4 --- /dev/null +++ b/3rdparty/SPIRV/.gitignore @@ -0,0 +1,2 @@ +/build +/x64 diff --git a/3rdparty/SPIRV/CMakeLists.txt b/3rdparty/SPIRV/CMakeLists.txt new file mode 100644 index 0000000000..9202f34769 --- /dev/null +++ b/3rdparty/SPIRV/CMakeLists.txt @@ -0,0 +1,3 @@ +set(SKIP_SPIRV_TOOLS_INSTALL ON CACHE BOOL "Skip spirv-tools install" FORCE) +set(SPIRV-Headers_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/SPIRV-Headers" CACHE STRING "SPIRV-Headers path" FORCE) +add_subdirectory(SPIRV-Tools) diff --git a/3rdparty/SPIRV/SPIRV-Headers b/3rdparty/SPIRV/SPIRV-Headers new file mode 160000 index 0000000000..3fdabd0da2 --- /dev/null +++ b/3rdparty/SPIRV/SPIRV-Headers @@ -0,0 +1 @@ +Subproject commit 3fdabd0da2932c276b25b9b4a988ba134eba1aa6 diff --git a/3rdparty/SPIRV/SPIRV-Tools b/3rdparty/SPIRV/SPIRV-Tools new file mode 160000 index 0000000000..895927bd3f --- /dev/null +++ b/3rdparty/SPIRV/SPIRV-Tools @@ -0,0 +1 @@ +Subproject commit 895927bd3f2d653f40cebab55aa6c7eabde30a86 diff --git a/3rdparty/SPIRV/spirv.vcxproj b/3rdparty/SPIRV/spirv.vcxproj new file mode 100644 index 0000000000..1ce9784ee1 --- /dev/null +++ b/3rdparty/SPIRV/spirv.vcxproj @@ -0,0 +1,69 @@ + + + + + Debug + x64 + + + Release + x64 + + + + {4CBD3DDD-5555-49A4-A44D-DD3D8CB516A1} + MakeFileProj + + + + + + Makefile + true + + + Makefile + false + + + + + + + + + + + + + + "Visual Studio $(VisualStudioVersion.Substring(0,2))" + cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Release" -DSPIRV_WERROR=OFF -DSPIRV-Headers_SOURCE_DIR=$([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)/SPIRV-Headers')) SPIRV-Tools -B build + cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Debug" -DSPIRV_WERROR=OFF -DSPIRV-Headers_SOURCE_DIR=$([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)/SPIRV-Headers')) -S SPIRV-Tools -B build + $([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)..\..\buildfiles\msvc\common_default.props')) + + + $(CmakeReleaseCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:build /p:Configuration=Release /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + $(CmakeReleaseCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:rebuild /p:Configuration=Release /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + $(CmakeReleaseCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:clean /p:Configuration=Release /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + + + $(CmakeDebugCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:build /p:Configuration=Debug /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + + $(CmakeDebugCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:rebuild /p:Configuration=Debug /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + + $(CmakeDebugCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:clean /p:Configuration=Debug /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + + + + + + + + \ No newline at end of file diff --git a/3rdparty/SPIRV/spirv.vcxproj.filters b/3rdparty/SPIRV/spirv.vcxproj.filters new file mode 100644 index 0000000000..9cd8510566 --- /dev/null +++ b/3rdparty/SPIRV/spirv.vcxproj.filters @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/3rdparty/SoundTouch/CMakeLists.txt b/3rdparty/SoundTouch/CMakeLists.txt deleted file mode 100644 index acc7d02714..0000000000 --- a/3rdparty/SoundTouch/CMakeLists.txt +++ /dev/null @@ -1,36 +0,0 @@ -add_library(soundtouch STATIC EXCLUDE_FROM_ALL - soundtouch/source/SoundTouch/AAFilter.cpp - soundtouch/source/SoundTouch/FIFOSampleBuffer.cpp - soundtouch/source/SoundTouch/FIRFilter.cpp - soundtouch/source/SoundTouch/InterpolateCubic.cpp - soundtouch/source/SoundTouch/InterpolateLinear.cpp - soundtouch/source/SoundTouch/InterpolateShannon.cpp - soundtouch/source/SoundTouch/RateTransposer.cpp - soundtouch/source/SoundTouch/SoundTouch.cpp - soundtouch/source/SoundTouch/sse_optimized.cpp - soundtouch/source/SoundTouch/TDStretch.cpp -) - -target_include_directories(soundtouch PRIVATE - soundtouch/source/SoundTouch - soundtouch/include) - -target_include_directories(soundtouch INTERFACE - $ - $) - -set_property(TARGET soundtouch PROPERTY FOLDER "3rdparty/") - -target_compile_definitions(soundtouch PUBLIC - ST_NO_EXCEPTION_HANDLING - USE_MULTICH_ALWAYS - SOUNDTOUCH_FLOAT_SAMPLES; -) - -target_compile_options(soundtouch PRIVATE "-w") - -if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86|X86|amd64|AMD64|em64t|EM64T)") - target_compile_definitions(soundtouch PUBLIC - SOUNDTOUCH_ALLOW_SSE - ) -endif () diff --git a/3rdparty/SoundTouch/soundtouch b/3rdparty/SoundTouch/soundtouch deleted file mode 160000 index 3982730833..0000000000 --- a/3rdparty/SoundTouch/soundtouch +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3982730833b6daefe77dcfb32b5c282851640c17 diff --git a/3rdparty/SoundTouch/soundtouch.vcxproj b/3rdparty/SoundTouch/soundtouch.vcxproj deleted file mode 100644 index 760e58abb3..0000000000 --- a/3rdparty/SoundTouch/soundtouch.vcxproj +++ /dev/null @@ -1,79 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {508c291a-3d18-49f5-b25d-f7c8db92cb21} - soundtouch - - - - - - StaticLibrary - false - true - Unicode - x64 - - - - - - - - - - - - - - - - - - TurnOffAllWarnings - false - SOUNDTOUCH_ALLOW_SSE;ST_NO_EXCEPTION_HANDLING;USE_MULTICH_ALWAYS;SOUNDTOUCH_FLOAT_SAMPLES;%(PreprocessorDefinitions) - ./soundtouch/source/SoundTouch;./soundtouch/include;%(AdditionalIncludeDirectories) - ProgramDatabase - MaxSpeed - - - - - - \ No newline at end of file diff --git a/3rdparty/SoundTouch/soundtouch.vcxproj.filters b/3rdparty/SoundTouch/soundtouch.vcxproj.filters deleted file mode 100644 index dac3e1803b..0000000000 --- a/3rdparty/SoundTouch/soundtouch.vcxproj.filters +++ /dev/null @@ -1,78 +0,0 @@ - - - - - {4FC737F1-C7A5-4376-A066-2A32D752A2FF} - - - {93995380-89BD-4b04-88EB-625FBE52EBFB} - - - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - diff --git a/3rdparty/XAudio2Redist/include/xaudio2redist.h b/3rdparty/XAudio2Redist/include/xaudio2redist.h new file mode 100644 index 0000000000..c27a9218a6 --- /dev/null +++ b/3rdparty/XAudio2Redist/include/xaudio2redist.h @@ -0,0 +1,1275 @@ +/************************************************************************** + * + * Copyright (c) Microsoft Corporation. All rights reserved. + * + * File: xaudio2.h + * Content: Declarations for the XAudio2 game audio API. + * + **************************************************************************/ + +#ifdef _MSC_VER +#pragma once +#endif + +#ifndef __XAUDIO2_INCLUDED__ +#define __XAUDIO2_INCLUDED__ + +#include + +// Current name of the DLL shipped in the same SDK as this header. +// The name reflects the current version +#define XAUDIO2_DLL_A "xaudio2_9redist.dll" +#define XAUDIO2_DLL_W L"xaudio2_9redist.dll" +#define XAUDIO2D_DLL_A "xaudio2_9redist.dll" +#define XAUDIO2D_DLL_W L"xaudio2_9redist.dll" + +#ifdef UNICODE + #define XAUDIO2_DLL XAUDIO2_DLL_W + #define XAUDIO2D_DLL XAUDIO2D_DLL_W +#else + #define XAUDIO2_DLL XAUDIO2_DLL_A + #define XAUDIO2D_DLL XAUDIO2D_DLL_A +#endif + + +/************************************************************************** + * + * XAudio2 COM object class and interface IDs. + * + **************************************************************************/ + +#include + +#ifdef __cplusplus + // XAudio 2.9 + interface __declspec(uuid("2B02E3CF-2E0B-4ec3-BE45-1B2A3FE7210D")) IXAudio2; + interface __declspec(uuid("84ac29bb-d619-44d2-b197-e4acf7df3ed6")) IXAudio2Extension; + EXTERN_C const GUID DECLSPEC_SELECTANY IID_IXAudio2Extension = __uuidof(IXAudio2Extension); + EXTERN_C const GUID DECLSPEC_SELECTANY IID_IXAudio2 = __uuidof(IXAudio2); + +#else // #ifdef __cplusplus + // Compiling with C for Windows 10 and later + DEFINE_GUID(IID_IXAudio2, 0x2B02E3CF, 0x2E0B, 0x4ec3, 0xBE, 0x45, 0x1B, 0x2A, 0x3F, 0xE7, 0x21, 0x0D); + DEFINE_GUID(IID_IXAudio2Extension, 0x84ac29bb, 0xd619, 0x44d2, 0xb1, 0x97, 0xe4, 0xac, 0xf7, 0xdf, 0x3e, 0xd6); +#endif // #ifdef __cplusplus + + +// Ignore the rest of this header if only the GUID definitions were requested +#ifndef GUID_DEFS_ONLY + +#include // Windows COM declarations +#include // Markers for documenting API semantics +#include // Basic data types and constants for audio work +#include // For AUDIO_STREAM_CATEGORY + +// The Windows 7 version of audiosessiontypes.h does not define AUDIO_STREAM_CATEGORY, so if we are targeting +// Windows 7 we might have to define it here, depending on which SDK is used. +#if _WIN32_WINNT < _WIN32_WINNT_WIN8 + +// If we are compiling for Windows 7, we might be using the Windows 7 Platform SDK, which does not have AUDIO_STREAM_CATEGORY. +// But we might be using a newer SDK (such as the Win8 Platform SDK), which has AUDIO_STREAM_CATEGORY. +// Determine if we are using the Windows 7 Platform SDK by checking if WAVE_FORMAT_WM9_SPECTRUM_ANALYZER is defined. +#ifndef WAVE_FORMAT_WM9_SPECTRUM_ANALYZER +typedef enum _AUDIO_STREAM_CATEGORY +{ + AudioCategory_Other = 0, + AudioCategory_ForegroundOnlyMedia = 1, + AudioCategory_Communications = 3, + AudioCategory_Alerts = 4, + AudioCategory_SoundEffects = 5, + AudioCategory_GameEffects = 6, + AudioCategory_GameMedia = 7, + AudioCategory_GameChat = 8, + AudioCategory_Speech = 9, + AudioCategory_Movie = 10, + AudioCategory_Media = 11, +} AUDIO_STREAM_CATEGORY; +#endif /* WAVE_FORMAT_WM9_SPECTRUM_ANALYZER */ +#endif /* NTDDI_VERSION < NTDDI_WIN8 */ + +// All structures defined in this file use tight field packing +#pragma pack(push, 1) + + +/************************************************************************** + * + * XAudio2 constants, flags and error codes. + * + **************************************************************************/ + +// Numeric boundary values +#define XAUDIO2_MAX_BUFFER_BYTES 0x80000000 // Maximum bytes allowed in a source buffer +#define XAUDIO2_MAX_QUEUED_BUFFERS 64 // Maximum buffers allowed in a voice queue +#define XAUDIO2_MAX_BUFFERS_SYSTEM 2 // Maximum buffers allowed for system threads (Xbox 360 only) +#define XAUDIO2_MAX_AUDIO_CHANNELS 64 // Maximum channels in an audio stream +#define XAUDIO2_MIN_SAMPLE_RATE 1000 // Minimum audio sample rate supported +#define XAUDIO2_MAX_SAMPLE_RATE 200000 // Maximum audio sample rate supported +#define XAUDIO2_MAX_VOLUME_LEVEL 16777216.0f // Maximum acceptable volume level (2^24) +#define XAUDIO2_MIN_FREQ_RATIO (1/1024.0f) // Minimum SetFrequencyRatio argument +#define XAUDIO2_MAX_FREQ_RATIO 1024.0f // Maximum MaxFrequencyRatio argument +#define XAUDIO2_DEFAULT_FREQ_RATIO 2.0f // Default MaxFrequencyRatio argument +#define XAUDIO2_MAX_FILTER_ONEOVERQ 1.5f // Maximum XAUDIO2_FILTER_PARAMETERS.OneOverQ +#define XAUDIO2_MAX_FILTER_FREQUENCY 1.0f // Maximum XAUDIO2_FILTER_PARAMETERS.Frequency +#define XAUDIO2_MAX_LOOP_COUNT 254 // Maximum non-infinite XAUDIO2_BUFFER.LoopCount +#define XAUDIO2_MAX_INSTANCES 8 // Maximum simultaneous XAudio2 objects on Xbox 360 + +// For XMA voices on Xbox 360 there is an additional restriction on the MaxFrequencyRatio +// argument and the voice's sample rate: the product of these numbers cannot exceed 600000 +// for one-channel voices or 300000 for voices with more than one channel. +#define XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MONO 600000 +#define XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MULTICHANNEL 300000 + +// Numeric values with special meanings +#define XAUDIO2_COMMIT_NOW 0 // Used as an OperationSet argument +#define XAUDIO2_COMMIT_ALL 0 // Used in IXAudio2::CommitChanges +#define XAUDIO2_INVALID_OPSET (UINT32)(-1) // Not allowed for OperationSet arguments +#define XAUDIO2_NO_LOOP_REGION 0 // Used in XAUDIO2_BUFFER.LoopCount +#define XAUDIO2_LOOP_INFINITE 255 // Used in XAUDIO2_BUFFER.LoopCount +#define XAUDIO2_DEFAULT_CHANNELS 0 // Used in CreateMasteringVoice +#define XAUDIO2_DEFAULT_SAMPLERATE 0 // Used in CreateMasteringVoice + +// Flags +#define XAUDIO2_DEBUG_ENGINE 0x0001 // Used in XAudio2Create +#define XAUDIO2_VOICE_NOPITCH 0x0002 // Used in IXAudio2::CreateSourceVoice +#define XAUDIO2_VOICE_NOSRC 0x0004 // Used in IXAudio2::CreateSourceVoice +#define XAUDIO2_VOICE_USEFILTER 0x0008 // Used in IXAudio2::CreateSource/SubmixVoice +#define XAUDIO2_PLAY_TAILS 0x0020 // Used in IXAudio2SourceVoice::Stop +#define XAUDIO2_END_OF_STREAM 0x0040 // Used in XAUDIO2_BUFFER.Flags +#define XAUDIO2_SEND_USEFILTER 0x0080 // Used in XAUDIO2_SEND_DESCRIPTOR.Flags +#define XAUDIO2_VOICE_NOSAMPLESPLAYED 0x0100 // Used in IXAudio2SourceVoice::GetState +#define XAUDIO2_STOP_ENGINE_WHEN_IDLE 0x2000 // Used in XAudio2Create to force the engine to Stop when no source voices are Started, and Start when a voice is Started +#define XAUDIO2_1024_QUANTUM 0x8000 // Used in XAudio2Create to specify nondefault processing quantum of 21.33 ms (1024 samples at 48KHz) +#define XAUDIO2_NO_VIRTUAL_AUDIO_CLIENT 0x10000 // Used in CreateMasteringVoice to create a virtual audio client + +// Default parameters for the built-in filter +#define XAUDIO2_DEFAULT_FILTER_TYPE LowPassFilter +#define XAUDIO2_DEFAULT_FILTER_FREQUENCY XAUDIO2_MAX_FILTER_FREQUENCY +#define XAUDIO2_DEFAULT_FILTER_ONEOVERQ 1.0f + +// Internal XAudio2 constants +// The audio frame quantum can be calculated by reducing the fraction: +// SamplesPerAudioFrame / SamplesPerSecond +#define XAUDIO2_QUANTUM_NUMERATOR 1 // On Windows, XAudio2 processes audio +#define XAUDIO2_QUANTUM_DENOMINATOR 100 // in 10ms chunks (= 1/100 seconds) +#define XAUDIO2_QUANTUM_MS (1000.0f * XAUDIO2_QUANTUM_NUMERATOR / XAUDIO2_QUANTUM_DENOMINATOR) + +// XAudio2 error codes +#define FACILITY_XAUDIO2 0x896 +#define XAUDIO2_E_INVALID_CALL ((HRESULT)0x88960001) // An API call or one of its arguments was illegal +#define XAUDIO2_E_XMA_DECODER_ERROR ((HRESULT)0x88960002) // The XMA hardware suffered an unrecoverable error +#define XAUDIO2_E_XAPO_CREATION_FAILED ((HRESULT)0x88960003) // XAudio2 failed to initialize an XAPO effect +#define XAUDIO2_E_DEVICE_INVALIDATED ((HRESULT)0x88960004) // An audio device became unusable (unplugged, etc) + +/************************************************************************** + * + * Forward declarations for the XAudio2 interfaces. + * + **************************************************************************/ + +#ifdef __cplusplus + #define FWD_DECLARE(x) interface x +#else + #define FWD_DECLARE(x) typedef interface x x +#endif + +FWD_DECLARE(IXAudio2); +FWD_DECLARE(IXAudio2Voice); +FWD_DECLARE(IXAudio2SourceVoice); +FWD_DECLARE(IXAudio2SubmixVoice); +FWD_DECLARE(IXAudio2MasteringVoice); +FWD_DECLARE(IXAudio2EngineCallback); +FWD_DECLARE(IXAudio2VoiceCallback); + + +/************************************************************************** + * + * XAudio2 structures and enumerations. + * + **************************************************************************/ + +// Used in XAudio2Create, specifies which CPU(s) to use. +typedef UINT32 XAUDIO2_PROCESSOR; +#define Processor1 0x00000001 +#define Processor2 0x00000002 +#define Processor3 0x00000004 +#define Processor4 0x00000008 +#define Processor5 0x00000010 +#define Processor6 0x00000020 +#define Processor7 0x00000040 +#define Processor8 0x00000080 +#define Processor9 0x00000100 +#define Processor10 0x00000200 +#define Processor11 0x00000400 +#define Processor12 0x00000800 +#define Processor13 0x00001000 +#define Processor14 0x00002000 +#define Processor15 0x00004000 +#define Processor16 0x00008000 +#define Processor17 0x00010000 +#define Processor18 0x00020000 +#define Processor19 0x00040000 +#define Processor20 0x00080000 +#define Processor21 0x00100000 +#define Processor22 0x00200000 +#define Processor23 0x00400000 +#define Processor24 0x00800000 +#define Processor25 0x01000000 +#define Processor26 0x02000000 +#define Processor27 0x04000000 +#define Processor28 0x08000000 +#define Processor29 0x10000000 +#define Processor30 0x20000000 +#define Processor31 0x40000000 +#define Processor32 0x80000000 +#define XAUDIO2_ANY_PROCESSOR 0xffffffff + +// This value indicates that XAudio2 will choose the default processor by itself. The actual value chosen +// may vary depending on the hardware platform. +#define XAUDIO2_USE_DEFAULT_PROCESSOR 0x00000000 + +// This definition is included for backwards compatibilty. New implementations should use +// XAUDIO2_USE_DEFAULT_PROCESSOR instead to let XAudio2 select the appropriate default processor for the hardware platform. +#define XAUDIO2_DEFAULT_PROCESSOR Processor1 + +// Returned by IXAudio2Voice::GetVoiceDetails +typedef struct XAUDIO2_VOICE_DETAILS +{ + UINT32 CreationFlags; // Flags the voice was created with. + UINT32 ActiveFlags; // Flags currently active. + UINT32 InputChannels; // Channels in the voice's input audio. + UINT32 InputSampleRate; // Sample rate of the voice's input audio. +} XAUDIO2_VOICE_DETAILS; + +// Used in XAUDIO2_VOICE_SENDS below +typedef struct XAUDIO2_SEND_DESCRIPTOR +{ + UINT32 Flags; // Either 0 or XAUDIO2_SEND_USEFILTER. + IXAudio2Voice* pOutputVoice; // This send's destination voice. +} XAUDIO2_SEND_DESCRIPTOR; + +// Used in the voice creation functions and in IXAudio2Voice::SetOutputVoices +typedef struct XAUDIO2_VOICE_SENDS +{ + UINT32 SendCount; // Number of sends from this voice. + XAUDIO2_SEND_DESCRIPTOR* pSends; // Array of SendCount send descriptors. +} XAUDIO2_VOICE_SENDS; + +// Used in XAUDIO2_EFFECT_CHAIN below +typedef struct XAUDIO2_EFFECT_DESCRIPTOR +{ + IUnknown* pEffect; // Pointer to the effect object's IUnknown interface. + BOOL InitialState; // TRUE if the effect should begin in the enabled state. + UINT32 OutputChannels; // How many output channels the effect should produce. +} XAUDIO2_EFFECT_DESCRIPTOR; + +// Used in the voice creation functions and in IXAudio2Voice::SetEffectChain +typedef struct XAUDIO2_EFFECT_CHAIN +{ + UINT32 EffectCount; // Number of effects in this voice's effect chain. + XAUDIO2_EFFECT_DESCRIPTOR* pEffectDescriptors; // Array of effect descriptors. +} XAUDIO2_EFFECT_CHAIN; + +// Used in XAUDIO2_FILTER_PARAMETERS below +typedef enum XAUDIO2_FILTER_TYPE +{ + LowPassFilter, // Attenuates frequencies above the cutoff frequency (state-variable filter). + BandPassFilter, // Attenuates frequencies outside a given range (state-variable filter). + HighPassFilter, // Attenuates frequencies below the cutoff frequency (state-variable filter). + NotchFilter, // Attenuates frequencies inside a given range (state-variable filter). + LowPassOnePoleFilter, // Attenuates frequencies above the cutoff frequency (one-pole filter, XAUDIO2_FILTER_PARAMETERS.OneOverQ has no effect) + HighPassOnePoleFilter // Attenuates frequencies below the cutoff frequency (one-pole filter, XAUDIO2_FILTER_PARAMETERS.OneOverQ has no effect) +} XAUDIO2_FILTER_TYPE; + +// Used in IXAudio2Voice::Set/GetFilterParameters and Set/GetOutputFilterParameters +typedef struct XAUDIO2_FILTER_PARAMETERS +{ + XAUDIO2_FILTER_TYPE Type; // Filter type. + float Frequency; // Filter coefficient. + // must be >= 0 and <= XAUDIO2_MAX_FILTER_FREQUENCY + // See XAudio2CutoffFrequencyToRadians() for state-variable filter types and + // XAudio2CutoffFrequencyToOnePoleCoefficient() for one-pole filter types. + float OneOverQ; // Reciprocal of the filter's quality factor Q; + // must be > 0 and <= XAUDIO2_MAX_FILTER_ONEOVERQ. + // Has no effect for one-pole filters. +} XAUDIO2_FILTER_PARAMETERS; + +// Used in IXAudio2SourceVoice::SubmitSourceBuffer +typedef struct XAUDIO2_BUFFER +{ + UINT32 Flags; // Either 0 or XAUDIO2_END_OF_STREAM. + UINT32 AudioBytes; // Size of the audio data buffer in bytes. + const BYTE* pAudioData; // Pointer to the audio data buffer. + UINT32 PlayBegin; // First sample in this buffer to be played. + UINT32 PlayLength; // Length of the region to be played in samples, + // or 0 to play the whole buffer. + UINT32 LoopBegin; // First sample of the region to be looped. + UINT32 LoopLength; // Length of the desired loop region in samples, + // or 0 to loop the entire buffer. + UINT32 LoopCount; // Number of times to repeat the loop region, + // or XAUDIO2_LOOP_INFINITE to loop forever. + void* pContext; // Context value to be passed back in callbacks. +} XAUDIO2_BUFFER; + +// Used in IXAudio2SourceVoice::SubmitSourceBuffer when submitting XWMA data. +// NOTE: If an XWMA sound is submitted in more than one buffer, each buffer's +// pDecodedPacketCumulativeBytes[PacketCount-1] value must be subtracted from +// all the entries in the next buffer's pDecodedPacketCumulativeBytes array. +// And whether a sound is submitted in more than one buffer or not, the final +// buffer of the sound should use the XAUDIO2_END_OF_STREAM flag, or else the +// client must call IXAudio2SourceVoice::Discontinuity after submitting it. +typedef struct XAUDIO2_BUFFER_WMA +{ + const UINT32* pDecodedPacketCumulativeBytes; // Decoded packet's cumulative size array. + // Each element is the number of bytes accumulated + // when the corresponding XWMA packet is decoded in + // order. The array must have PacketCount elements. + UINT32 PacketCount; // Number of XWMA packets submitted. Must be >= 1 and + // divide evenly into XAUDIO2_BUFFER.AudioBytes. +} XAUDIO2_BUFFER_WMA; + +// Returned by IXAudio2SourceVoice::GetState +typedef struct XAUDIO2_VOICE_STATE +{ + void* pCurrentBufferContext; // The pContext value provided in the XAUDIO2_BUFFER + // that is currently being processed, or NULL if + // there are no buffers in the queue. + UINT32 BuffersQueued; // Number of buffers currently queued on the voice + // (including the one that is being processed). + UINT64 SamplesPlayed; // Total number of samples produced by the voice since + // it began processing the current audio stream. + // If XAUDIO2_VOICE_NOSAMPLESPLAYED is specified + // in the call to IXAudio2SourceVoice::GetState, + // this member will not be calculated, saving CPU. +} XAUDIO2_VOICE_STATE; + +// Returned by IXAudio2::GetPerformanceData +typedef struct XAUDIO2_PERFORMANCE_DATA +{ + // CPU usage information + UINT64 AudioCyclesSinceLastQuery; // CPU cycles spent on audio processing since the + // last call to StartEngine or GetPerformanceData. + UINT64 TotalCyclesSinceLastQuery; // Total CPU cycles elapsed since the last call + // (only counts the CPU XAudio2 is running on). + UINT32 MinimumCyclesPerQuantum; // Fewest CPU cycles spent processing any one + // audio quantum since the last call. + UINT32 MaximumCyclesPerQuantum; // Most CPU cycles spent processing any one + // audio quantum since the last call. + + // Memory usage information + UINT32 MemoryUsageInBytes; // Total heap space currently in use. + + // Audio latency and glitching information + UINT32 CurrentLatencyInSamples; // Minimum delay from when a sample is read from a + // source buffer to when it reaches the speakers. + UINT32 GlitchesSinceEngineStarted; // Audio dropouts since the engine was started. + + // Data about XAudio2's current workload + UINT32 ActiveSourceVoiceCount; // Source voices currently playing. + UINT32 TotalSourceVoiceCount; // Source voices currently existing. + UINT32 ActiveSubmixVoiceCount; // Submix voices currently playing/existing. + + UINT32 ActiveResamplerCount; // Resample xAPOs currently active. + UINT32 ActiveMatrixMixCount; // MatrixMix xAPOs currently active. + + // Usage of the hardware XMA decoder (Xbox 360 only) + UINT32 ActiveXmaSourceVoices; // Number of source voices decoding XMA data. + UINT32 ActiveXmaStreams; // A voice can use more than one XMA stream. +} XAUDIO2_PERFORMANCE_DATA; + +// Used in IXAudio2::SetDebugConfiguration +typedef struct XAUDIO2_DEBUG_CONFIGURATION +{ + UINT32 TraceMask; // Bitmap of enabled debug message types. + UINT32 BreakMask; // Message types that will break into the debugger. + BOOL LogThreadID; // Whether to log the thread ID with each message. + BOOL LogFileline; // Whether to log the source file and line number. + BOOL LogFunctionName; // Whether to log the function name. + BOOL LogTiming; // Whether to log message timestamps. +} XAUDIO2_DEBUG_CONFIGURATION; + +// Values for the TraceMask and BreakMask bitmaps. Only ERRORS and WARNINGS +// are valid in BreakMask. WARNINGS implies ERRORS, DETAIL implies INFO, and +// FUNC_CALLS implies API_CALLS. By default, TraceMask is ERRORS and WARNINGS +// and all the other settings are zero. +#define XAUDIO2_LOG_ERRORS 0x0001 // For handled errors with serious effects. +#define XAUDIO2_LOG_WARNINGS 0x0002 // For handled errors that may be recoverable. +#define XAUDIO2_LOG_INFO 0x0004 // Informational chit-chat (e.g. state changes). +#define XAUDIO2_LOG_DETAIL 0x0008 // More detailed chit-chat. +#define XAUDIO2_LOG_API_CALLS 0x0010 // Public API function entries and exits. +#define XAUDIO2_LOG_FUNC_CALLS 0x0020 // Internal function entries and exits. +#define XAUDIO2_LOG_TIMING 0x0040 // Delays detected and other timing data. +#define XAUDIO2_LOG_LOCKS 0x0080 // Usage of critical sections and mutexes. +#define XAUDIO2_LOG_MEMORY 0x0100 // Memory heap usage information. +#define XAUDIO2_LOG_STREAMING 0x1000 // Audio streaming information. + + +/************************************************************************** + * + * IXAudio2: Top-level XAudio2 COM interface. + * + **************************************************************************/ + +// Use default arguments if compiling as C++ +#ifdef __cplusplus + #define X2DEFAULT(x) =x +#else + #define X2DEFAULT(x) +#endif + +#undef INTERFACE +#define INTERFACE IXAudio2 +DECLARE_INTERFACE_(IXAudio2, IUnknown) +{ + // NAME: IXAudio2::QueryInterface + // DESCRIPTION: Queries for a given COM interface on the XAudio2 object. + // Only IID_IUnknown, IID_IXAudio2 and IID_IXaudio2Extension are supported. + // + // ARGUMENTS: + // riid - IID of the interface to be obtained. + // ppvInterface - Returns a pointer to the requested interface. + // + STDMETHOD(QueryInterface) (THIS_ REFIID riid, _COM_Outptr_ void** ppvInterface) PURE; + + // NAME: IXAudio2::AddRef + // DESCRIPTION: Adds a reference to the XAudio2 object. + // + STDMETHOD_(ULONG, AddRef) (THIS) PURE; + + // NAME: IXAudio2::Release + // DESCRIPTION: Releases a reference to the XAudio2 object. + // + STDMETHOD_(ULONG, Release) (THIS) PURE; + + // NAME: IXAudio2::RegisterForCallbacks + // DESCRIPTION: Adds a new client to receive XAudio2's engine callbacks. + // + // ARGUMENTS: + // pCallback - Callback interface to be called during each processing pass. + // + STDMETHOD(RegisterForCallbacks) (THIS_ _In_ IXAudio2EngineCallback* pCallback) PURE; + + // NAME: IXAudio2::UnregisterForCallbacks + // DESCRIPTION: Removes an existing receiver of XAudio2 engine callbacks. + // + // ARGUMENTS: + // pCallback - Previously registered callback interface to be removed. + // + STDMETHOD_(void, UnregisterForCallbacks) (THIS_ _In_ IXAudio2EngineCallback* pCallback) PURE; + + // NAME: IXAudio2::CreateSourceVoice + // DESCRIPTION: Creates and configures a source voice. + // + // ARGUMENTS: + // ppSourceVoice - Returns the new object's IXAudio2SourceVoice interface. + // pSourceFormat - Format of the audio that will be fed to the voice. + // Flags - XAUDIO2_VOICE flags specifying the source voice's behavior. + // MaxFrequencyRatio - Maximum SetFrequencyRatio argument to be allowed. + // pCallback - Optional pointer to a client-provided callback interface. + // pSendList - Optional list of voices this voice should send audio to. + // pEffectChain - Optional list of effects to apply to the audio data. + // + STDMETHOD(CreateSourceVoice) (THIS_ _Outptr_ IXAudio2SourceVoice** ppSourceVoice, + _In_ const WAVEFORMATEX* pSourceFormat, + UINT32 Flags X2DEFAULT(0), + float MaxFrequencyRatio X2DEFAULT(XAUDIO2_DEFAULT_FREQ_RATIO), + _In_opt_ IXAudio2VoiceCallback* pCallback X2DEFAULT(NULL), + _In_opt_ const XAUDIO2_VOICE_SENDS* pSendList X2DEFAULT(NULL), + _In_opt_ const XAUDIO2_EFFECT_CHAIN* pEffectChain X2DEFAULT(NULL)) PURE; + + // NAME: IXAudio2::CreateSubmixVoice + // DESCRIPTION: Creates and configures a submix voice. + // + // ARGUMENTS: + // ppSubmixVoice - Returns the new object's IXAudio2SubmixVoice interface. + // InputChannels - Number of channels in this voice's input audio data. + // InputSampleRate - Sample rate of this voice's input audio data. + // Flags - XAUDIO2_VOICE flags specifying the submix voice's behavior. + // ProcessingStage - Arbitrary number that determines the processing order. + // pSendList - Optional list of voices this voice should send audio to. + // pEffectChain - Optional list of effects to apply to the audio data. + // + STDMETHOD(CreateSubmixVoice) (THIS_ _Outptr_ IXAudio2SubmixVoice** ppSubmixVoice, + UINT32 InputChannels, UINT32 InputSampleRate, + UINT32 Flags X2DEFAULT(0), UINT32 ProcessingStage X2DEFAULT(0), + _In_opt_ const XAUDIO2_VOICE_SENDS* pSendList X2DEFAULT(NULL), + _In_opt_ const XAUDIO2_EFFECT_CHAIN* pEffectChain X2DEFAULT(NULL)) PURE; + + + // NAME: IXAudio2::CreateMasteringVoice + // DESCRIPTION: Creates and configures a mastering voice. + // + // ARGUMENTS: + // ppMasteringVoice - Returns the new object's IXAudio2MasteringVoice interface. + // InputChannels - Number of channels in this voice's input audio data. + // InputSampleRate - Sample rate of this voice's input audio data. + // Flags - XAUDIO2_VOICE flags specifying the mastering voice's behavior. + // szDeviceId - Identifier of the device to receive the output audio. + // pEffectChain - Optional list of effects to apply to the audio data. + // StreamCategory - The audio stream category to use for this mastering voice + // + STDMETHOD(CreateMasteringVoice) (THIS_ _Outptr_ IXAudio2MasteringVoice** ppMasteringVoice, + UINT32 InputChannels X2DEFAULT(XAUDIO2_DEFAULT_CHANNELS), + UINT32 InputSampleRate X2DEFAULT(XAUDIO2_DEFAULT_SAMPLERATE), + UINT32 Flags X2DEFAULT(0), _In_opt_z_ LPCWSTR szDeviceId X2DEFAULT(NULL), + _In_opt_ const XAUDIO2_EFFECT_CHAIN* pEffectChain X2DEFAULT(NULL), + _In_ AUDIO_STREAM_CATEGORY StreamCategory X2DEFAULT(AudioCategory_GameEffects)) PURE; + + // NAME: IXAudio2::StartEngine + // DESCRIPTION: Creates and starts the audio processing thread. + // + STDMETHOD(StartEngine) (THIS) PURE; + + // NAME: IXAudio2::StopEngine + // DESCRIPTION: Stops and destroys the audio processing thread. + // + STDMETHOD_(void, StopEngine) (THIS) PURE; + + // NAME: IXAudio2::CommitChanges + // DESCRIPTION: Atomically applies a set of operations previously tagged + // with a given identifier. + // + // ARGUMENTS: + // OperationSet - Identifier of the set of operations to be applied. + // + STDMETHOD(CommitChanges) (THIS_ UINT32 OperationSet) PURE; + + // NAME: IXAudio2::GetPerformanceData + // DESCRIPTION: Returns current resource usage details: memory, CPU, etc. + // + // ARGUMENTS: + // pPerfData - Returns the performance data structure. + // + STDMETHOD_(void, GetPerformanceData) (THIS_ _Out_ XAUDIO2_PERFORMANCE_DATA* pPerfData) PURE; + + // NAME: IXAudio2::SetDebugConfiguration + // DESCRIPTION: Configures XAudio2's debug output (in debug builds only). + // + // ARGUMENTS: + // pDebugConfiguration - Structure describing the debug output behavior. + // pReserved - Optional parameter; must be NULL. + // + STDMETHOD_(void, SetDebugConfiguration) (THIS_ _In_opt_ const XAUDIO2_DEBUG_CONFIGURATION* pDebugConfiguration, + _Reserved_ void* pReserved X2DEFAULT(NULL)) PURE; +}; + +// This interface extends IXAudio2 with additional functionality. +// Use IXAudio2::QueryInterface to obtain a pointer to this interface. +#undef INTERFACE +#define INTERFACE IXAudio2Extension +DECLARE_INTERFACE_(IXAudio2Extension, IUnknown) +{ + // NAME: IXAudio2Extension::QueryInterface + // DESCRIPTION: Queries for a given COM interface on the XAudio2 object. + // Only IID_IUnknown, IID_IXAudio2 and IID_IXaudio2Extension are supported. + // + // ARGUMENTS: + // riid - IID of the interface to be obtained. + // ppvInterface - Returns a pointer to the requested interface. + // + STDMETHOD(QueryInterface) (THIS_ REFIID riid, _COM_Outptr_ void** ppvInterface) PURE; + + // NAME: IXAudio2Extension::AddRef + // DESCRIPTION: Adds a reference to the XAudio2 object. + // + STDMETHOD_(ULONG, AddRef) (THIS) PURE; + + // NAME: IXAudio2Extension::Release + // DESCRIPTION: Releases a reference to the XAudio2 object. + // + STDMETHOD_(ULONG, Release) (THIS) PURE; + + // NAME: IXAudio2Extension::GetProcessingQuantum + // DESCRIPTION: Returns the processing quantum + // quantumMilliseconds = (1000.0f * quantumNumerator / quantumDenominator) + // + // ARGUMENTS: + // quantumNumerator - Quantum numerator + // quantumDenominator - Quantum denominator + // + STDMETHOD_(void, GetProcessingQuantum)(THIS_ _Out_ UINT32* quantumNumerator, _Out_range_(!= , 0) UINT32* quantumDenominator); + + // NAME: IXAudio2Extension::GetProcessor + // DESCRIPTION: Returns the number of the processor used by XAudio2 + // + // ARGUMENTS: + // processor - Non-zero Processor number + // + STDMETHOD_(void, GetProcessor)(THIS_ _Out_range_(!= , 0) XAUDIO2_PROCESSOR* processor); +}; + +/************************************************************************** + * + * IXAudio2Voice: Base voice management interface. + * + **************************************************************************/ + +#undef INTERFACE +#define INTERFACE IXAudio2Voice +DECLARE_INTERFACE(IXAudio2Voice) +{ + // These methods are declared in a macro so that the same declarations + // can be used in the derived voice types (IXAudio2SourceVoice, etc). + + #define Declare_IXAudio2Voice_Methods() \ + \ + /* NAME: IXAudio2Voice::GetVoiceDetails + // DESCRIPTION: Returns the basic characteristics of this voice. + // + // ARGUMENTS: + // pVoiceDetails - Returns the voice's details. + */\ + STDMETHOD_(void, GetVoiceDetails) (THIS_ _Out_ XAUDIO2_VOICE_DETAILS* pVoiceDetails) PURE; \ + \ + /* NAME: IXAudio2Voice::SetOutputVoices + // DESCRIPTION: Replaces the set of submix/mastering voices that receive + // this voice's output. + // + // ARGUMENTS: + // pSendList - Optional list of voices this voice should send audio to. + */\ + STDMETHOD(SetOutputVoices) (THIS_ _In_opt_ const XAUDIO2_VOICE_SENDS* pSendList) PURE; \ + \ + /* NAME: IXAudio2Voice::SetEffectChain + // DESCRIPTION: Replaces this voice's current effect chain with a new one. + // + // ARGUMENTS: + // pEffectChain - Structure describing the new effect chain to be used. + */\ + STDMETHOD(SetEffectChain) (THIS_ _In_opt_ const XAUDIO2_EFFECT_CHAIN* pEffectChain) PURE; \ + \ + /* NAME: IXAudio2Voice::EnableEffect + // DESCRIPTION: Enables an effect in this voice's effect chain. + // + // ARGUMENTS: + // EffectIndex - Index of an effect within this voice's effect chain. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(EnableEffect) (THIS_ UINT32 EffectIndex, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::DisableEffect + // DESCRIPTION: Disables an effect in this voice's effect chain. + // + // ARGUMENTS: + // EffectIndex - Index of an effect within this voice's effect chain. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(DisableEffect) (THIS_ UINT32 EffectIndex, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetEffectState + // DESCRIPTION: Returns the running state of an effect. + // + // ARGUMENTS: + // EffectIndex - Index of an effect within this voice's effect chain. + // pEnabled - Returns the enabled/disabled state of the given effect. + */\ + STDMETHOD_(void, GetEffectState) (THIS_ UINT32 EffectIndex, _Out_ BOOL* pEnabled) PURE; \ + \ + /* NAME: IXAudio2Voice::SetEffectParameters + // DESCRIPTION: Sets effect-specific parameters. + // + // REMARKS: Unlike IXAPOParameters::SetParameters, this method may + // be called from any thread. XAudio2 implements + // appropriate synchronization to copy the parameters to the + // realtime audio processing thread. + // + // ARGUMENTS: + // EffectIndex - Index of an effect within this voice's effect chain. + // pParameters - Pointer to an effect-specific parameters block. + // ParametersByteSize - Size of the pParameters array in bytes. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(SetEffectParameters) (THIS_ UINT32 EffectIndex, \ + _In_reads_bytes_(ParametersByteSize) const void* pParameters, \ + UINT32 ParametersByteSize, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetEffectParameters + // DESCRIPTION: Obtains the current effect-specific parameters. + // + // ARGUMENTS: + // EffectIndex - Index of an effect within this voice's effect chain. + // pParameters - Returns the current values of the effect-specific parameters. + // ParametersByteSize - Size of the pParameters array in bytes. + */\ + STDMETHOD(GetEffectParameters) (THIS_ UINT32 EffectIndex, \ + _Out_writes_bytes_(ParametersByteSize) void* pParameters, \ + UINT32 ParametersByteSize) PURE; \ + \ + /* NAME: IXAudio2Voice::SetFilterParameters + // DESCRIPTION: Sets this voice's filter parameters. + // + // ARGUMENTS: + // pParameters - Pointer to the filter's parameter structure. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(SetFilterParameters) (THIS_ _In_ const XAUDIO2_FILTER_PARAMETERS* pParameters, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetFilterParameters + // DESCRIPTION: Returns this voice's current filter parameters. + // + // ARGUMENTS: + // pParameters - Returns the filter parameters. + */\ + STDMETHOD_(void, GetFilterParameters) (THIS_ _Out_ XAUDIO2_FILTER_PARAMETERS* pParameters) PURE; \ + \ + /* NAME: IXAudio2Voice::SetOutputFilterParameters + // DESCRIPTION: Sets the filter parameters on one of this voice's sends. + // + // ARGUMENTS: + // pDestinationVoice - Destination voice of the send whose filter parameters will be set. + // pParameters - Pointer to the filter's parameter structure. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(SetOutputFilterParameters) (THIS_ _In_opt_ IXAudio2Voice* pDestinationVoice, \ + _In_ const XAUDIO2_FILTER_PARAMETERS* pParameters, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetOutputFilterParameters + // DESCRIPTION: Returns the filter parameters from one of this voice's sends. + // + // ARGUMENTS: + // pDestinationVoice - Destination voice of the send whose filter parameters will be read. + // pParameters - Returns the filter parameters. + */\ + STDMETHOD_(void, GetOutputFilterParameters) (THIS_ _In_opt_ IXAudio2Voice* pDestinationVoice, \ + _Out_ XAUDIO2_FILTER_PARAMETERS* pParameters) PURE; \ + \ + /* NAME: IXAudio2Voice::SetVolume + // DESCRIPTION: Sets this voice's overall volume level. + // + // ARGUMENTS: + // Volume - New overall volume level to be used, as an amplitude factor. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(SetVolume) (THIS_ float Volume, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetVolume + // DESCRIPTION: Obtains this voice's current overall volume level. + // + // ARGUMENTS: + // pVolume: Returns the voice's current overall volume level. + */\ + STDMETHOD_(void, GetVolume) (THIS_ _Out_ float* pVolume) PURE; \ + \ + /* NAME: IXAudio2Voice::SetChannelVolumes + // DESCRIPTION: Sets this voice's per-channel volume levels. + // + // ARGUMENTS: + // Channels - Used to confirm the voice's channel count. + // pVolumes - Array of per-channel volume levels to be used. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(SetChannelVolumes) (THIS_ UINT32 Channels, _In_reads_(Channels) const float* pVolumes, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetChannelVolumes + // DESCRIPTION: Returns this voice's current per-channel volume levels. + // + // ARGUMENTS: + // Channels - Used to confirm the voice's channel count. + // pVolumes - Returns an array of the current per-channel volume levels. + */\ + STDMETHOD_(void, GetChannelVolumes) (THIS_ UINT32 Channels, _Out_writes_(Channels) float* pVolumes) PURE; \ + \ + /* NAME: IXAudio2Voice::SetOutputMatrix + // DESCRIPTION: Sets the volume levels used to mix from each channel of this + // voice's output audio to each channel of a given destination + // voice's input audio. + // + // ARGUMENTS: + // pDestinationVoice - The destination voice whose mix matrix to change. + // SourceChannels - Used to confirm this voice's output channel count + // (the number of channels produced by the last effect in the chain). + // DestinationChannels - Confirms the destination voice's input channels. + // pLevelMatrix - Array of [SourceChannels * DestinationChannels] send + // levels. The level used to send from source channel S to destination + // channel D should be in pLevelMatrix[S + SourceChannels * D]. + // OperationSet - Used to identify this call as part of a deferred batch. + */\ + STDMETHOD(SetOutputMatrix) (THIS_ _In_opt_ IXAudio2Voice* pDestinationVoice, \ + UINT32 SourceChannels, UINT32 DestinationChannels, \ + _In_reads_(SourceChannels * DestinationChannels) const float* pLevelMatrix, \ + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; \ + \ + /* NAME: IXAudio2Voice::GetOutputMatrix + // DESCRIPTION: Obtains the volume levels used to send each channel of this + // voice's output audio to each channel of a given destination + // voice's input audio. + // + // ARGUMENTS: + // pDestinationVoice - The destination voice whose mix matrix to obtain. + // SourceChannels - Used to confirm this voice's output channel count + // (the number of channels produced by the last effect in the chain). + // DestinationChannels - Confirms the destination voice's input channels. + // pLevelMatrix - Array of send levels, as above. + */\ + STDMETHOD_(void, GetOutputMatrix) (THIS_ _In_opt_ IXAudio2Voice* pDestinationVoice, \ + UINT32 SourceChannels, UINT32 DestinationChannels, \ + _Out_writes_(SourceChannels * DestinationChannels) float* pLevelMatrix) PURE; \ + \ + /* NAME: IXAudio2Voice::DestroyVoice + // DESCRIPTION: Destroys this voice, stopping it if necessary and removing + // it from the XAudio2 graph. + */\ + STDMETHOD_(void, DestroyVoice) (THIS) PURE + + Declare_IXAudio2Voice_Methods(); +}; + + +/************************************************************************** + * + * IXAudio2SourceVoice: Source voice management interface. + * + **************************************************************************/ + +#undef INTERFACE +#define INTERFACE IXAudio2SourceVoice +DECLARE_INTERFACE_(IXAudio2SourceVoice, IXAudio2Voice) +{ + // Methods from IXAudio2Voice base interface + Declare_IXAudio2Voice_Methods(); + + // NAME: IXAudio2SourceVoice::Start + // DESCRIPTION: Makes this voice start consuming and processing audio. + // + // ARGUMENTS: + // Flags - Flags controlling how the voice should be started. + // OperationSet - Used to identify this call as part of a deferred batch. + // + STDMETHOD(Start) (THIS_ UINT32 Flags X2DEFAULT(0), UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; + + // NAME: IXAudio2SourceVoice::Stop + // DESCRIPTION: Makes this voice stop consuming audio. + // + // ARGUMENTS: + // Flags - Flags controlling how the voice should be stopped. + // OperationSet - Used to identify this call as part of a deferred batch. + // + STDMETHOD(Stop) (THIS_ UINT32 Flags X2DEFAULT(0), UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; + + // NAME: IXAudio2SourceVoice::SubmitSourceBuffer + // DESCRIPTION: Adds a new audio buffer to this voice's input queue. + // + // ARGUMENTS: + // pBuffer - Pointer to the buffer structure to be queued. + // pBufferWMA - Additional structure used only when submitting XWMA data. + // + STDMETHOD(SubmitSourceBuffer) (THIS_ _In_ const XAUDIO2_BUFFER* pBuffer, _In_opt_ const XAUDIO2_BUFFER_WMA* pBufferWMA X2DEFAULT(NULL)) PURE; + + // NAME: IXAudio2SourceVoice::FlushSourceBuffers + // DESCRIPTION: Removes all pending audio buffers from this voice's queue. + // + STDMETHOD(FlushSourceBuffers) (THIS) PURE; + + // NAME: IXAudio2SourceVoice::Discontinuity + // DESCRIPTION: Notifies the voice of an intentional break in the stream of + // audio buffers (e.g. the end of a sound), to prevent XAudio2 + // from interpreting an empty buffer queue as a glitch. + // + STDMETHOD(Discontinuity) (THIS) PURE; + + // NAME: IXAudio2SourceVoice::ExitLoop + // DESCRIPTION: Breaks out of the current loop when its end is reached. + // + // ARGUMENTS: + // OperationSet - Used to identify this call as part of a deferred batch. + // + STDMETHOD(ExitLoop) (THIS_ UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; + + // NAME: IXAudio2SourceVoice::GetState + // DESCRIPTION: Returns the number of buffers currently queued on this voice, + // the pContext value associated with the currently processing + // buffer (if any), and other voice state information. + // + // ARGUMENTS: + // pVoiceState - Returns the state information. + // Flags - Flags controlling what voice state is returned. + // + STDMETHOD_(void, GetState) (THIS_ _Out_ XAUDIO2_VOICE_STATE* pVoiceState, UINT32 Flags X2DEFAULT(0)) PURE; + + // NAME: IXAudio2SourceVoice::SetFrequencyRatio + // DESCRIPTION: Sets this voice's frequency adjustment, i.e. its pitch. + // + // ARGUMENTS: + // Ratio - Frequency change, expressed as source frequency / target frequency. + // OperationSet - Used to identify this call as part of a deferred batch. + // + STDMETHOD(SetFrequencyRatio) (THIS_ float Ratio, + UINT32 OperationSet X2DEFAULT(XAUDIO2_COMMIT_NOW)) PURE; + + // NAME: IXAudio2SourceVoice::GetFrequencyRatio + // DESCRIPTION: Returns this voice's current frequency adjustment ratio. + // + // ARGUMENTS: + // pRatio - Returns the frequency adjustment. + // + STDMETHOD_(void, GetFrequencyRatio) (THIS_ _Out_ float* pRatio) PURE; + + // NAME: IXAudio2SourceVoice::SetSourceSampleRate + // DESCRIPTION: Reconfigures this voice to treat its source data as being + // at a different sample rate than the original one specified + // in CreateSourceVoice's pSourceFormat argument. + // + // ARGUMENTS: + // UINT32 - The intended sample rate of further submitted source data. + // + STDMETHOD(SetSourceSampleRate) (THIS_ UINT32 NewSourceSampleRate) PURE; +}; + + +/************************************************************************** + * + * IXAudio2SubmixVoice: Submixing voice management interface. + * + **************************************************************************/ + +#undef INTERFACE +#define INTERFACE IXAudio2SubmixVoice +DECLARE_INTERFACE_(IXAudio2SubmixVoice, IXAudio2Voice) +{ + // Methods from IXAudio2Voice base interface + Declare_IXAudio2Voice_Methods(); + + // There are currently no methods specific to submix voices. +}; + + +/************************************************************************** + * + * IXAudio2MasteringVoice: Mastering voice management interface. + * + **************************************************************************/ + +#undef INTERFACE +#define INTERFACE IXAudio2MasteringVoice +DECLARE_INTERFACE_(IXAudio2MasteringVoice, IXAudio2Voice) +{ + // Methods from IXAudio2Voice base interface + Declare_IXAudio2Voice_Methods(); + + // NAME: IXAudio2MasteringVoice::GetChannelMask + // DESCRIPTION: Returns the channel mask for this voice + // + // ARGUMENTS: + // pChannelMask - returns the channel mask for this voice. This corresponds + // to the dwChannelMask member of WAVEFORMATEXTENSIBLE. + // + STDMETHOD(GetChannelMask) (THIS_ _Out_ DWORD* pChannelmask) PURE; +}; + + +/************************************************************************** + * + * IXAudio2EngineCallback: Client notification interface for engine events. + * + * REMARKS: Contains methods to notify the client when certain events happen + * in the XAudio2 engine. This interface should be implemented by + * the client. XAudio2 will call these methods via the interface + * pointer provided by the client when it calls + * IXAudio2::RegisterForCallbacks. + * + **************************************************************************/ + +#undef INTERFACE +#define INTERFACE IXAudio2EngineCallback +DECLARE_INTERFACE(IXAudio2EngineCallback) +{ + // Called by XAudio2 just before an audio processing pass begins. + STDMETHOD_(void, OnProcessingPassStart) (THIS) PURE; + + // Called just after an audio processing pass ends. + STDMETHOD_(void, OnProcessingPassEnd) (THIS) PURE; + + // Called in the event of a critical system error which requires XAudio2 + // to be closed down and restarted. The error code is given in Error. + STDMETHOD_(void, OnCriticalError) (THIS_ HRESULT Error) PURE; +}; + + +/************************************************************************** + * + * IXAudio2VoiceCallback: Client notification interface for voice events. + * + * REMARKS: Contains methods to notify the client when certain events happen + * in an XAudio2 voice. This interface should be implemented by the + * client. XAudio2 will call these methods via an interface pointer + * provided by the client in the IXAudio2::CreateSourceVoice call. + * + **************************************************************************/ + +#undef INTERFACE +#define INTERFACE IXAudio2VoiceCallback +DECLARE_INTERFACE(IXAudio2VoiceCallback) +{ + // Called just before this voice's processing pass begins. + STDMETHOD_(void, OnVoiceProcessingPassStart) (THIS_ UINT32 BytesRequired) PURE; + + // Called just after this voice's processing pass ends. + STDMETHOD_(void, OnVoiceProcessingPassEnd) (THIS) PURE; + + // Called when this voice has just finished playing a buffer stream + // (as marked with the XAUDIO2_END_OF_STREAM flag on the last buffer). + STDMETHOD_(void, OnStreamEnd) (THIS) PURE; + + // Called when this voice is about to start processing a new buffer. + STDMETHOD_(void, OnBufferStart) (THIS_ void* pBufferContext) PURE; + + // Called when this voice has just finished processing a buffer. + // The buffer can now be reused or destroyed. + STDMETHOD_(void, OnBufferEnd) (THIS_ void* pBufferContext) PURE; + + // Called when this voice has just reached the end position of a loop. + STDMETHOD_(void, OnLoopEnd) (THIS_ void* pBufferContext) PURE; + + // Called in the event of a critical error during voice processing, + // such as a failing xAPO or an error from the hardware XMA decoder. + // The voice may have to be destroyed and re-created to recover from + // the error. The callback arguments report which buffer was being + // processed when the error occurred, and its HRESULT code. + STDMETHOD_(void, OnVoiceError) (THIS_ void* pBufferContext, HRESULT Error) PURE; +}; + + +/************************************************************************** + * + * Macros to make it easier to use the XAudio2 COM interfaces in C code. + * + **************************************************************************/ + +#ifndef __cplusplus + +// IXAudio2 +#define IXAudio2_QueryInterface(This,riid,ppvInterface) ((This)->lpVtbl->QueryInterface(This,riid,ppvInterface)) +#define IXAudio2_AddRef(This) ((This)->lpVtbl->AddRef(This)) +#define IXAudio2_Release(This) ((This)->lpVtbl->Release(This)) +#define IXAudio2_RegisterForCallbacks(This,pCallback) ((This)->lpVtbl->RegisterForCallbacks(This,pCallback)) +#define IXAudio2_UnregisterForCallbacks(This,pCallback) ((This)->lpVtbl->UnregisterForCallbacks(This,pCallback)) +#define IXAudio2_CreateSourceVoice(This,ppSourceVoice,pSourceFormat,Flags,MaxFrequencyRatio,pCallback,pSendList,pEffectChain) ((This)->lpVtbl->CreateSourceVoice(This,ppSourceVoice,pSourceFormat,Flags,MaxFrequencyRatio,pCallback,pSendList,pEffectChain)) +#define IXAudio2_CreateSubmixVoice(This,ppSubmixVoice,InputChannels,InputSampleRate,Flags,ProcessingStage,pSendList,pEffectChain) ((This)->lpVtbl->CreateSubmixVoice(This,ppSubmixVoice,InputChannels,InputSampleRate,Flags,ProcessingStage,pSendList,pEffectChain)) +#define IXAudio2_CreateMasteringVoice(This,ppMasteringVoice,InputChannels,InputSampleRate,Flags,DeviceId,pEffectChain,StreamCategory) ((This)->lpVtbl->CreateMasteringVoice(This,ppMasteringVoice,InputChannels,InputSampleRate,Flags,DeviceId,pEffectChain,StreamCategory)) +#define IXAudio2_StartEngine(This) ((This)->lpVtbl->StartEngine(This)) +#define IXAudio2_StopEngine(This) ((This)->lpVtbl->StopEngine(This)) +#define IXAudio2_CommitChanges(This,OperationSet) ((This)->lpVtbl->CommitChanges(This,OperationSet)) +#define IXAudio2_GetPerformanceData(This,pPerfData) ((This)->lpVtbl->GetPerformanceData(This,pPerfData)) +#define IXAudio2_SetDebugConfiguration(This,pDebugConfiguration,pReserved) ((This)->lpVtbl->SetDebugConfiguration(This,pDebugConfiguration,pReserved)) + +// IXAudio2Extension +#define IXAudio2Extension_GetProcessingQuantum(This,quantumNumerator,quantumDenominator) ((This)->lpVtbl->GetProcessingQuantum(This,quantumNumerator,quantumDenominator)) +#define IXAudio2Extension_GetProcessor(This,processor) ((This)->lpVtbl->GetProcessor(This,processor)) + +// IXAudio2Voice +#define IXAudio2Voice_GetVoiceDetails(This,pVoiceDetails) ((This)->lpVtbl->GetVoiceDetails(This,pVoiceDetails)) +#define IXAudio2Voice_SetOutputVoices(This,pSendList) ((This)->lpVtbl->SetOutputVoices(This,pSendList)) +#define IXAudio2Voice_SetEffectChain(This,pEffectChain) ((This)->lpVtbl->SetEffectChain(This,pEffectChain)) +#define IXAudio2Voice_EnableEffect(This,EffectIndex,OperationSet) ((This)->lpVtbl->EnableEffect(This,EffectIndex,OperationSet)) +#define IXAudio2Voice_DisableEffect(This,EffectIndex,OperationSet) ((This)->lpVtbl->DisableEffect(This,EffectIndex,OperationSet)) +#define IXAudio2Voice_GetEffectState(This,EffectIndex,pEnabled) ((This)->lpVtbl->GetEffectState(This,EffectIndex,pEnabled)) +#define IXAudio2Voice_SetEffectParameters(This,EffectIndex,pParameters,ParametersByteSize, OperationSet) ((This)->lpVtbl->SetEffectParameters(This,EffectIndex,pParameters,ParametersByteSize,OperationSet)) +#define IXAudio2Voice_GetEffectParameters(This,EffectIndex,pParameters,ParametersByteSize) ((This)->lpVtbl->GetEffectParameters(This,EffectIndex,pParameters,ParametersByteSize)) +#define IXAudio2Voice_SetFilterParameters(This,pParameters,OperationSet) ((This)->lpVtbl->SetFilterParameters(This,pParameters,OperationSet)) +#define IXAudio2Voice_GetFilterParameters(This,pParameters) ((This)->lpVtbl->GetFilterParameters(This,pParameters)) +#define IXAudio2Voice_SetOutputFilterParameters(This,pDestinationVoice,pParameters,OperationSet) ((This)->lpVtbl->SetOutputFilterParameters(This,pDestinationVoice,pParameters,OperationSet)) +#define IXAudio2Voice_GetOutputFilterParameters(This,pDestinationVoice,pParameters) ((This)->lpVtbl->GetOutputFilterParameters(This,pDestinationVoice,pParameters)) +#define IXAudio2Voice_SetVolume(This,Volume,OperationSet) ((This)->lpVtbl->SetVolume(This,Volume,OperationSet)) +#define IXAudio2Voice_GetVolume(This,pVolume) ((This)->lpVtbl->GetVolume(This,pVolume)) +#define IXAudio2Voice_SetChannelVolumes(This,Channels,pVolumes,OperationSet) ((This)->lpVtbl->SetChannelVolumes(This,Channels,pVolumes,OperationSet)) +#define IXAudio2Voice_GetChannelVolumes(This,Channels,pVolumes) ((This)->lpVtbl->GetChannelVolumes(This,Channels,pVolumes)) +#define IXAudio2Voice_SetOutputMatrix(This,pDestinationVoice,SourceChannels,DestinationChannels,pLevelMatrix,OperationSet) ((This)->lpVtbl->SetOutputMatrix(This,pDestinationVoice,SourceChannels,DestinationChannels,pLevelMatrix,OperationSet)) +#define IXAudio2Voice_GetOutputMatrix(This,pDestinationVoice,SourceChannels,DestinationChannels,pLevelMatrix) ((This)->lpVtbl->GetOutputMatrix(This,pDestinationVoice,SourceChannels,DestinationChannels,pLevelMatrix)) +#define IXAudio2Voice_DestroyVoice(This) ((This)->lpVtbl->DestroyVoice(This)) + +// IXAudio2SourceVoice +#define IXAudio2SourceVoice_GetVoiceDetails IXAudio2Voice_GetVoiceDetails +#define IXAudio2SourceVoice_SetOutputVoices IXAudio2Voice_SetOutputVoices +#define IXAudio2SourceVoice_SetEffectChain IXAudio2Voice_SetEffectChain +#define IXAudio2SourceVoice_EnableEffect IXAudio2Voice_EnableEffect +#define IXAudio2SourceVoice_DisableEffect IXAudio2Voice_DisableEffect +#define IXAudio2SourceVoice_GetEffectState IXAudio2Voice_GetEffectState +#define IXAudio2SourceVoice_SetEffectParameters IXAudio2Voice_SetEffectParameters +#define IXAudio2SourceVoice_GetEffectParameters IXAudio2Voice_GetEffectParameters +#define IXAudio2SourceVoice_SetFilterParameters IXAudio2Voice_SetFilterParameters +#define IXAudio2SourceVoice_GetFilterParameters IXAudio2Voice_GetFilterParameters +#define IXAudio2SourceVoice_SetOutputFilterParameters IXAudio2Voice_SetOutputFilterParameters +#define IXAudio2SourceVoice_GetOutputFilterParameters IXAudio2Voice_GetOutputFilterParameters +#define IXAudio2SourceVoice_SetVolume IXAudio2Voice_SetVolume +#define IXAudio2SourceVoice_GetVolume IXAudio2Voice_GetVolume +#define IXAudio2SourceVoice_SetChannelVolumes IXAudio2Voice_SetChannelVolumes +#define IXAudio2SourceVoice_GetChannelVolumes IXAudio2Voice_GetChannelVolumes +#define IXAudio2SourceVoice_SetOutputMatrix IXAudio2Voice_SetOutputMatrix +#define IXAudio2SourceVoice_GetOutputMatrix IXAudio2Voice_GetOutputMatrix +#define IXAudio2SourceVoice_DestroyVoice IXAudio2Voice_DestroyVoice +#define IXAudio2SourceVoice_Start(This,Flags,OperationSet) ((This)->lpVtbl->Start(This,Flags,OperationSet)) +#define IXAudio2SourceVoice_Stop(This,Flags,OperationSet) ((This)->lpVtbl->Stop(This,Flags,OperationSet)) +#define IXAudio2SourceVoice_SubmitSourceBuffer(This,pBuffer,pBufferWMA) ((This)->lpVtbl->SubmitSourceBuffer(This,pBuffer,pBufferWMA)) +#define IXAudio2SourceVoice_FlushSourceBuffers(This) ((This)->lpVtbl->FlushSourceBuffers(This)) +#define IXAudio2SourceVoice_Discontinuity(This) ((This)->lpVtbl->Discontinuity(This)) +#define IXAudio2SourceVoice_ExitLoop(This,OperationSet) ((This)->lpVtbl->ExitLoop(This,OperationSet)) +#define IXAudio2SourceVoice_GetState(This,pVoiceState,Flags) ((This)->lpVtbl->GetState(This,pVoiceState,Flags)) +#define IXAudio2SourceVoice_SetFrequencyRatio(This,Ratio,OperationSet) ((This)->lpVtbl->SetFrequencyRatio(This,Ratio,OperationSet)) +#define IXAudio2SourceVoice_GetFrequencyRatio(This,pRatio) ((This)->lpVtbl->GetFrequencyRatio(This,pRatio)) +#define IXAudio2SourceVoice_SetSourceSampleRate(This,NewSourceSampleRate) ((This)->lpVtbl->SetSourceSampleRate(This,NewSourceSampleRate)) + +// IXAudio2SubmixVoice +#define IXAudio2SubmixVoice_GetVoiceDetails IXAudio2Voice_GetVoiceDetails +#define IXAudio2SubmixVoice_SetOutputVoices IXAudio2Voice_SetOutputVoices +#define IXAudio2SubmixVoice_SetEffectChain IXAudio2Voice_SetEffectChain +#define IXAudio2SubmixVoice_EnableEffect IXAudio2Voice_EnableEffect +#define IXAudio2SubmixVoice_DisableEffect IXAudio2Voice_DisableEffect +#define IXAudio2SubmixVoice_GetEffectState IXAudio2Voice_GetEffectState +#define IXAudio2SubmixVoice_SetEffectParameters IXAudio2Voice_SetEffectParameters +#define IXAudio2SubmixVoice_GetEffectParameters IXAudio2Voice_GetEffectParameters +#define IXAudio2SubmixVoice_SetFilterParameters IXAudio2Voice_SetFilterParameters +#define IXAudio2SubmixVoice_GetFilterParameters IXAudio2Voice_GetFilterParameters +#define IXAudio2SubmixVoice_SetOutputFilterParameters IXAudio2Voice_SetOutputFilterParameters +#define IXAudio2SubmixVoice_GetOutputFilterParameters IXAudio2Voice_GetOutputFilterParameters +#define IXAudio2SubmixVoice_SetVolume IXAudio2Voice_SetVolume +#define IXAudio2SubmixVoice_GetVolume IXAudio2Voice_GetVolume +#define IXAudio2SubmixVoice_SetChannelVolumes IXAudio2Voice_SetChannelVolumes +#define IXAudio2SubmixVoice_GetChannelVolumes IXAudio2Voice_GetChannelVolumes +#define IXAudio2SubmixVoice_SetOutputMatrix IXAudio2Voice_SetOutputMatrix +#define IXAudio2SubmixVoice_GetOutputMatrix IXAudio2Voice_GetOutputMatrix +#define IXAudio2SubmixVoice_DestroyVoice IXAudio2Voice_DestroyVoice + +// IXAudio2MasteringVoice +#define IXAudio2MasteringVoice_GetVoiceDetails IXAudio2Voice_GetVoiceDetails +#define IXAudio2MasteringVoice_SetOutputVoices IXAudio2Voice_SetOutputVoices +#define IXAudio2MasteringVoice_SetEffectChain IXAudio2Voice_SetEffectChain +#define IXAudio2MasteringVoice_EnableEffect IXAudio2Voice_EnableEffect +#define IXAudio2MasteringVoice_DisableEffect IXAudio2Voice_DisableEffect +#define IXAudio2MasteringVoice_GetEffectState IXAudio2Voice_GetEffectState +#define IXAudio2MasteringVoice_SetEffectParameters IXAudio2Voice_SetEffectParameters +#define IXAudio2MasteringVoice_GetEffectParameters IXAudio2Voice_GetEffectParameters +#define IXAudio2MasteringVoice_SetFilterParameters IXAudio2Voice_SetFilterParameters +#define IXAudio2MasteringVoice_GetFilterParameters IXAudio2Voice_GetFilterParameters +#define IXAudio2MasteringVoice_SetOutputFilterParameters IXAudio2Voice_SetOutputFilterParameters +#define IXAudio2MasteringVoice_GetOutputFilterParameters IXAudio2Voice_GetOutputFilterParameters +#define IXAudio2MasteringVoice_SetVolume IXAudio2Voice_SetVolume +#define IXAudio2MasteringVoice_GetVolume IXAudio2Voice_GetVolume +#define IXAudio2MasteringVoice_SetChannelVolumes IXAudio2Voice_SetChannelVolumes +#define IXAudio2MasteringVoice_GetChannelVolumes IXAudio2Voice_GetChannelVolumes +#define IXAudio2MasteringVoice_SetOutputMatrix IXAudio2Voice_SetOutputMatrix +#define IXAudio2MasteringVoice_GetOutputMatrix IXAudio2Voice_GetOutputMatrix +#define IXAudio2MasteringVoice_DestroyVoice IXAudio2Voice_DestroyVoice +#define IXAudio2MasteringVoice_GetChannelMask(This,pChannelMask) ((This)->lpVtbl->GetChannelMask(This,pChannelMask)) + +#endif // #ifndef __cplusplus + + +/************************************************************************** + * + * Utility functions used to convert from pitch in semitones and volume + * in decibels to the frequency and amplitude ratio units used by XAudio2. + * These are only defined if the client #defines XAUDIO2_HELPER_FUNCTIONS + * prior to #including xaudio2.h. + * + **************************************************************************/ + +#ifdef XAUDIO2_HELPER_FUNCTIONS + +#define _USE_MATH_DEFINES // Make math.h define M_PI +#include // For powf, log10f, sinf and asinf + +// Calculate the argument to SetVolume from a decibel value +__inline float XAudio2DecibelsToAmplitudeRatio(float Decibels) +{ + return powf(10.0f, Decibels / 20.0f); +} + +// Recover a volume in decibels from an amplitude factor +__inline float XAudio2AmplitudeRatioToDecibels(float Volume) +{ + if (Volume == 0) + { + return -3.402823466e+38f; // Smallest float value (-FLT_MAX) + } + return 20.0f * log10f(Volume); +} + +// Calculate the argument to SetFrequencyRatio from a semitone value +__inline float XAudio2SemitonesToFrequencyRatio(float Semitones) +{ + // FrequencyRatio = 2 ^ Octaves + // = 2 ^ (Semitones / 12) + return powf(2.0f, Semitones / 12.0f); +} + +// Recover a pitch in semitones from a frequency ratio +__inline float XAudio2FrequencyRatioToSemitones(float FrequencyRatio) +{ + // Semitones = 12 * log2(FrequencyRatio) + // = 12 * log2(10) * log10(FrequencyRatio) + return 39.86313713864835f * log10f(FrequencyRatio); +} + +// Convert from filter cutoff frequencies expressed in Hertz to the radian +// frequency values used in XAUDIO2_FILTER_PARAMETERS.Frequency, state-variable +// filter types only. Use XAudio2CutoffFrequencyToOnePoleCoefficient() for one-pole filter types. +// Note that the highest CutoffFrequency supported is SampleRate/6. +// Higher values of CutoffFrequency will return XAUDIO2_MAX_FILTER_FREQUENCY. +__inline float XAudio2CutoffFrequencyToRadians(float CutoffFrequency, UINT32 SampleRate) +{ + if ((UINT32)(CutoffFrequency * 6.0f) >= SampleRate) + { + return XAUDIO2_MAX_FILTER_FREQUENCY; + } + return 2.0f * sinf((float)M_PI * CutoffFrequency / (float)SampleRate); +} + +// Convert from radian frequencies back to absolute frequencies in Hertz +__inline float XAudio2RadiansToCutoffFrequency(float Radians, float SampleRate) +{ + return SampleRate * asinf(Radians / 2.0f) / (float)M_PI; +} + +// Convert from filter cutoff frequencies expressed in Hertz to the filter +// coefficients used with XAUDIO2_FILTER_PARAMETERS.Frequency, +// LowPassOnePoleFilter and HighPassOnePoleFilter filter types only. +// Use XAudio2CutoffFrequencyToRadians() for state-variable filter types. +__inline float XAudio2CutoffFrequencyToOnePoleCoefficient(float CutoffFrequency, UINT32 SampleRate) +{ + if ((UINT32)CutoffFrequency >= SampleRate) + { + return XAUDIO2_MAX_FILTER_FREQUENCY; + } + return ( 1.0f - powf(1.0f - 2.0f * CutoffFrequency / (float)SampleRate, 2.0f) ); +} + + +#endif // #ifdef XAUDIO2_HELPER_FUNCTIONS + + +/************************************************************************** + * + * XAudio2Create: Top-level function that creates an XAudio2 instance. + * + * ARGUMENTS: + * + * Flags - Flags specifying the XAudio2 object's behavior. + * + * XAudio2Processor - An XAUDIO2_PROCESSOR value that specifies the + * hardware threads (Xbox) or processors (Windows) that XAudio2 + * will use. Note that XAudio2 supports concurrent processing on + * multiple threads, using any combination of XAUDIO2_PROCESSOR + * flags. The values are platform-specific; platform-independent + * code can use XAUDIO2_USE_DEFAULT_PROCESSOR to use the default on + * each platform. + * + **************************************************************************/ + +// We're an xaudio2 client +#define XAUDIO2_STDAPI EXTERN_C DECLSPEC_IMPORT HRESULT STDAPICALLTYPE + +XAUDIO2_STDAPI XAudio2Create(_Outptr_ IXAudio2** ppXAudio2, UINT32 Flags X2DEFAULT(0), + XAUDIO2_PROCESSOR XAudio2Processor X2DEFAULT(XAUDIO2_USE_DEFAULT_PROCESSOR)); + +// Undo the #pragma pack(push, 1) directive at the top of this file +#pragma pack(pop) + +#endif // #ifndef GUID_DEFS_ONLY + +#endif // #ifndef __XAUDIO2_INCLUDED__ diff --git a/3rdparty/XAudio2Redist/libs/xaudio2_9redist.lib b/3rdparty/XAudio2Redist/libs/xaudio2_9redist.lib new file mode 100644 index 0000000000..96eb038304 Binary files /dev/null and b/3rdparty/XAudio2Redist/libs/xaudio2_9redist.lib differ diff --git a/3rdparty/asmjit/CMakeLists.txt b/3rdparty/asmjit/CMakeLists.txt index ae88ce2fe8..2032592584 100644 --- a/3rdparty/asmjit/CMakeLists.txt +++ b/3rdparty/asmjit/CMakeLists.txt @@ -3,7 +3,6 @@ set(ASMJIT_STATIC TRUE) set(ASMJIT_BUILD_X86 TRUE) set(ASMJIT_BUILD_ARM FALSE) set(ASMJIT_BUILD_TEST FALSE) -set(ASMJIT_NO_DEPRECATED TRUE) set(ASMJIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/asmjit" CACHE PATH "Location of 'asmjit'") include("${ASMJIT_DIR}/CMakeLists.txt") @@ -15,5 +14,4 @@ target_link_libraries(asmjit PRIVATE ${ASMJIT_DEPS}) # ASMJIT should have a option for disabling installing and this wouldnt # be required to avoid installing ASMJIT... -set_property(TARGET asmjit PROPERTY FOLDER "3rdparty/") add_library(3rdparty::asmjit ALIAS asmjit) diff --git a/3rdparty/asmjit/asmjit b/3rdparty/asmjit/asmjit index 416f735696..723f58581a 160000 --- a/3rdparty/asmjit/asmjit +++ b/3rdparty/asmjit/asmjit @@ -1 +1 @@ -Subproject commit 416f7356967c1f66784dc1580fe157f9406d8bff +Subproject commit 723f58581afc0f4cb16ba13396ff77e425896847 diff --git a/3rdparty/asmjit/asmjit.vcxproj b/3rdparty/asmjit/asmjit.vcxproj index b53b790f1c..a786d83b84 100644 --- a/3rdparty/asmjit/asmjit.vcxproj +++ b/3rdparty/asmjit/asmjit.vcxproj @@ -11,158 +11,66 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - + + + + - - - - - - - - - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - + {AC40FF01-426E-4838-A317-66354CEFAE88} @@ -179,14 +87,6 @@ x64 - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - @@ -203,10 +103,8 @@ NotUsing - ASMJIT_STATIC;ASMJIT_NO_DEPRECATED;%(PreprocessorDefinitions) + ASMJIT_STATIC;%(PreprocessorDefinitions) MaxSpeed - MaxSpeed - TurnOffAllWarnings diff --git a/3rdparty/asmjit/asmjit.vcxproj.filters b/3rdparty/asmjit/asmjit.vcxproj.filters index 2b73120e3a..0505172685 100644 --- a/3rdparty/asmjit/asmjit.vcxproj.filters +++ b/3rdparty/asmjit/asmjit.vcxproj.filters @@ -1,157 +1,65 @@  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - + + + + - - - - - - - - - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + \ No newline at end of file diff --git a/3rdparty/bcdec/bcdec.hpp b/3rdparty/bcdec/bcdec.hpp deleted file mode 100644 index 9fac43d0fb..0000000000 --- a/3rdparty/bcdec/bcdec.hpp +++ /dev/null @@ -1,170 +0,0 @@ -// Based on https://github.com/iOrange/bcdec/blob/963c5e56b7a335e066cff7d16a3de75f4e8ad366/bcdec.h -// provides functions to decompress blocks of BC compressed images -// -// ------------------------------------------------------------------------------ -// -// MIT LICENSE -// =========== -// Copyright (c) 2022 Sergii Kudlai -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// -// ------------------------------------------------------------------------------ - -#pragma once - -#include - -static void bcdec__color_block(const u8* compressedBlock, u8* dstColors, int destinationPitch, bool onlyOpaqueMode) { - u16 c0, c1; - u32 refColors[4]; /* 0xAABBGGRR */ - u32 colorIndices; - u32 r0, g0, b0, r1, g1, b1, r, g, b; - - c0 = *reinterpret_cast(compressedBlock); - c1 = *(reinterpret_cast(compressedBlock) + 1); - - /* Unpack 565 ref colors */ - r0 = (c0 >> 11) & 0x1F; - g0 = (c0 >> 5) & 0x3F; - b0 = c0 & 0x1F; - - r1 = (c1 >> 11) & 0x1F; - g1 = (c1 >> 5) & 0x3F; - b1 = c1 & 0x1F; - - /* Expand 565 ref colors to 888 */ - r = (r0 * 527 + 23) >> 6; - g = (g0 * 259 + 33) >> 6; - b = (b0 * 527 + 23) >> 6; - refColors[0] = 0xFF000000 | (r << 16) | (g << 8) | b; - - r = (r1 * 527 + 23) >> 6; - g = (g1 * 259 + 33) >> 6; - b = (b1 * 527 + 23) >> 6; - refColors[1] = 0xFF000000 | (r << 16) | (g << 8) | b; - - if (c0 > c1 || onlyOpaqueMode) - { /* Standard BC1 mode (also BC3 color block uses ONLY this mode) */ - /* color_2 = 2/3*color_0 + 1/3*color_1 - color_3 = 1/3*color_0 + 2/3*color_1 */ - r = ((2 * r0 + r1) * 351 + 61) >> 7; - g = ((2 * g0 + g1) * 2763 + 1039) >> 11; - b = ((2 * b0 + b1) * 351 + 61) >> 7; - refColors[2] = 0xFF000000 | (r << 16) | (g << 8) | b; - - r = ((r0 + r1 * 2) * 351 + 61) >> 7; - g = ((g0 + g1 * 2) * 2763 + 1039) >> 11; - b = ((b0 + b1 * 2) * 351 + 61) >> 7; - refColors[3] = 0xFF000000 | (r << 16) | (g << 8) | b; - } - else - { /* Quite rare BC1A mode */ - /* color_2 = 1/2*color_0 + 1/2*color_1; - color_3 = 0; */ - r = ((r0 + r1) * 1053 + 125) >> 8; - g = ((g0 + g1) * 4145 + 1019) >> 11; - b = ((b0 + b1) * 1053 + 125) >> 8; - refColors[2] = 0xFF000000 | (r << 16) | (g << 8) | b; - - refColors[3] = 0x00000000; - } - - colorIndices = *reinterpret_cast(compressedBlock + 4); - - /* Fill out the decompressed color block */ - for (int i = 0; i < 4; ++i) - { - for (int j = 0; j < 4; ++j) - { - int idx = colorIndices & 0x03; - *reinterpret_cast(dstColors + j * 4) = refColors[idx]; - colorIndices >>= 2; - } - - dstColors += destinationPitch; - } -} - -static void bcdec__sharp_alpha_block(const u16* alpha, u8* decompressed, int destinationPitch) { - for (int i = 0; i < 4; ++i) - { - for (int j = 0; j < 4; ++j) - { - decompressed[j * 4] = ((alpha[i] >> (4 * j)) & 0x0F) * 17; - } - decompressed += destinationPitch; - } -} - -static void bcdec__smooth_alpha_block(const u8* compressedBlock, u8* decompressed, int destinationPitch) { - u8 alpha[8]; - u64 block = *reinterpret_cast(compressedBlock); - u64 indices; - - alpha[0] = block & 0xFF; - alpha[1] = (block >> 8) & 0xFF; - - if (alpha[0] > alpha[1]) - { - /* 6 interpolated alpha values. */ - alpha[2] = (6 * alpha[0] + alpha[1]) / 7; /* 6/7*alpha_0 + 1/7*alpha_1 */ - alpha[3] = (5 * alpha[0] + 2 * alpha[1]) / 7; /* 5/7*alpha_0 + 2/7*alpha_1 */ - alpha[4] = (4 * alpha[0] + 3 * alpha[1]) / 7; /* 4/7*alpha_0 + 3/7*alpha_1 */ - alpha[5] = (3 * alpha[0] + 4 * alpha[1]) / 7; /* 3/7*alpha_0 + 4/7*alpha_1 */ - alpha[6] = (2 * alpha[0] + 5 * alpha[1]) / 7; /* 2/7*alpha_0 + 5/7*alpha_1 */ - alpha[7] = ( alpha[0] + 6 * alpha[1]) / 7; /* 1/7*alpha_0 + 6/7*alpha_1 */ - } - else - { - /* 4 interpolated alpha values. */ - alpha[2] = (4 * alpha[0] + alpha[1]) / 5; /* 4/5*alpha_0 + 1/5*alpha_1 */ - alpha[3] = (3 * alpha[0] + 2 * alpha[1]) / 5; /* 3/5*alpha_0 + 2/5*alpha_1 */ - alpha[4] = (2 * alpha[0] + 3 * alpha[1]) / 5; /* 2/5*alpha_0 + 3/5*alpha_1 */ - alpha[5] = ( alpha[0] + 4 * alpha[1]) / 5; /* 1/5*alpha_0 + 4/5*alpha_1 */ - alpha[6] = 0x00; - alpha[7] = 0xFF; - } - - indices = block >> 16; - for (int i = 0; i < 4; ++i) - { - for (int j = 0; j < 4; ++j) - { - decompressed[j * 4] = alpha[indices & 0x07]; - indices >>= 3; - } - decompressed += destinationPitch; - } -} - -static inline void bcdec_bc1(const u8* compressedBlock, u8* decompressedBlock, int destinationPitch) { - bcdec__color_block(compressedBlock, decompressedBlock, destinationPitch, false); -} - -static inline void bcdec_bc2(const u8* compressedBlock, u8* decompressedBlock, int destinationPitch) { - bcdec__color_block(compressedBlock + 8, decompressedBlock, destinationPitch, true); - bcdec__sharp_alpha_block(reinterpret_cast(compressedBlock), decompressedBlock + 3, destinationPitch); -} - -static inline void bcdec_bc3(const u8* compressedBlock, u8* decompressedBlock, int destinationPitch) { - bcdec__color_block(compressedBlock + 8, decompressedBlock, destinationPitch, true); - bcdec__smooth_alpha_block(compressedBlock, decompressedBlock + 3, destinationPitch); -} - diff --git a/3rdparty/cubeb/CMakeLists.txt b/3rdparty/cubeb/CMakeLists.txt deleted file mode 100644 index 6d1f231f0b..0000000000 --- a/3rdparty/cubeb/CMakeLists.txt +++ /dev/null @@ -1,23 +0,0 @@ -# Cubeb - -set(BUILD_SHARED_LIBS FALSE CACHE BOOL "Don't build shared libs") -set(BUILD_TESTS FALSE CACHE BOOL "Don't build tests") -set(BUILD_RUST_LIBS FALSE CACHE BOOL "Don't build rust libs") -set(BUILD_TOOLS FALSE CACHE BOOL "Don't build tools") -set(BUNDLE_SPEEX TRUE CACHE BOOL "Bundle the speex library") -set(LAZY_LOAD_LIBS TRUE CACHE BOOL "Lazily load shared libraries") -set(USE_SANITIZERS FALSE CACHE BOOL "Dont't use sanitizers") - -add_subdirectory(cubeb EXCLUDE_FROM_ALL) -add_library(3rdparty::cubeb ALIAS cubeb) - -if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm|ARM|aarch64|AArch64|Aarch64)") - target_compile_definitions(speex PUBLIC - #_USE_NEON - ) -elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86|X86|amd64|AMD64|em64t|EM64T)") - target_compile_definitions(speex PUBLIC - _USE_SSE - _USE_SSE2 - ) -endif () diff --git a/3rdparty/cubeb/cubeb b/3rdparty/cubeb/cubeb deleted file mode 160000 index 70b4e3db78..0000000000 --- a/3rdparty/cubeb/cubeb +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 70b4e3db7822de4d534959885cda109d6edbee36 diff --git a/3rdparty/cubeb/extra/cubeb_export.h b/3rdparty/cubeb/extra/cubeb_export.h deleted file mode 100644 index 0a831f4ac4..0000000000 --- a/3rdparty/cubeb/extra/cubeb_export.h +++ /dev/null @@ -1,3 +0,0 @@ -#pragma once - -#define CUBEB_EXPORT diff --git a/3rdparty/cubeb/libcubeb.vcxproj b/3rdparty/cubeb/libcubeb.vcxproj deleted file mode 100644 index 36229297fe..0000000000 --- a/3rdparty/cubeb/libcubeb.vcxproj +++ /dev/null @@ -1,96 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {fda7b080-03b0-48c8-b24f-88118981422a} - libcubeb - - - - - - StaticLibrary - false - false - Unicode - x64 - - - - - - - - - - - - - - - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - - TurnOffAllWarnings - false - OUTSIDE_SPEEX;RANDOM_PREFIX=speex;FLOATING_POINT;_USE_SSE;_USE_SSE2;EXPORT=;USE_WASAPI;USE_WINMM;CUBEB_WASAPI_USE_IAUDIOSTREAMVOLUME;%(PreprocessorDefinitions) - ./cubeb/subprojects/;./extra/;./cubeb/src/;./cubeb/include/;%(AdditionalIncludeDirectories) - MaxSpeed - - - - - - \ No newline at end of file diff --git a/3rdparty/cubeb/libcubeb.vcxproj.filters b/3rdparty/cubeb/libcubeb.vcxproj.filters deleted file mode 100644 index 980dd3338d..0000000000 --- a/3rdparty/cubeb/libcubeb.vcxproj.filters +++ /dev/null @@ -1,108 +0,0 @@ - - - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - - - {cd86de6d-e811-4c48-9653-af00c7098a45} - - - {566bbf3e-ca28-4390-b054-58c92a66f24e} - - - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - \ No newline at end of file diff --git a/3rdparty/curl/CMakeLists.txt b/3rdparty/curl/CMakeLists.txt index b20763af65..f0cc526167 100644 --- a/3rdparty/curl/CMakeLists.txt +++ b/3rdparty/curl/CMakeLists.txt @@ -1,37 +1,16 @@ # CURL if(USE_SYSTEM_CURL) - message(STATUS "RPCS3: using shared libcurl") + message("-- RPCS3: using shared libcurl") find_package(CURL REQUIRED) - add_library(3rdparty_libcurl INTERFACE) - target_link_libraries(3rdparty_libcurl INTERFACE CURL::libcurl) + add_library(libcurl INTERFACE) + target_link_libraries(libcurl INTERFACE CURL::libcurl) else() - message(STATUS "RPCS3: building libcurl + wolfssl submodules") - set(BUILD_CURL_EXE OFF CACHE BOOL "Set to ON to build curl executable.") - set(BUILD_STATIC_CURL OFF CACHE BOOL "Set to ON to build curl executable with static libcurl.") - set(BUILD_STATIC_LIBS ON CACHE BOOL "Set to ON to build static libcurl.") - set(BUILD_SHARED_LIBS OFF CACHE BOOL "Set to ON to build shared libcurl.") - find_package(WolfSSL REQUIRED) - set(CURL_USE_WOLFSSL ON CACHE BOOL "enable wolfSSL for SSL/TLS") - set(CURL_USE_OPENSSL OFF CACHE BOOL "Use OpenSSL code. Experimental") - set(HTTP_ONLY ON CACHE BOOL "disables all protocols except HTTP (This overrides all CURL_DISABLE_* options)") - set(USE_LIBIDN2 OFF CACHE BOOL "Use libidn2 for IDN support") # Disabled because MacOS CI doesn't work otherwise + message("-- RPCS3: building libcurl + wolfssl submodules") + option(BUILD_CURL_EXE "Set to ON to build curl executable." OFF) + option(CMAKE_USE_WOLFSSL "enable wolfSSL for SSL/TLS" ON) + option(HTTP_ONLY "disables all protocols except HTTP (This overrides all CURL_DISABLE_* options)" ON) + option(CURL_ZLIB "Set to ON to enable building curl with zlib support." OFF) set(CURL_CA_PATH "none" CACHE STRING "Location of default CA path. Set 'none' to disable or 'auto' for auto-detection. Defaults to 'auto'.") - option(CURL_DISABLE_INSTALL "Disable installation targets" ON) - if(USE_MSVC_STATIC_CRT) - set(CURL_STATIC_CRT ON CACHE BOOL "Use static crt to build curl") - endif() - if(WIN32) - set(ENABLE_UNICODE ON CACHE BOOL "enable Unicode") - endif() - set(CURL_USE_LIBSSH2 OFF CACHE BOOL "Use libSSH2") - set(CURL_USE_LIBPSL OFF CACHE BOOL "Use libPSL") - option(BUILD_TESTING "Build tests" OFF) - option(BUILD_EXAMPLES "Build libcurl examples" OFF) - add_subdirectory(curl EXCLUDE_FROM_ALL) - - add_library(3rdparty_libcurl INTERFACE) - target_link_libraries(3rdparty_libcurl INTERFACE libcurl_static) - endif() diff --git a/3rdparty/curl/curl b/3rdparty/curl/curl index fdb8a789d2..6b951a6928 160000 --- a/3rdparty/curl/curl +++ b/3rdparty/curl/curl @@ -1 +1 @@ -Subproject commit fdb8a789d2b446b77bd7cdd2eff95f6cbc814cf4 +Subproject commit 6b951a6928811507d493303b2878e848c077b471 diff --git a/3rdparty/curl/libcurl.vcxproj b/3rdparty/curl/libcurl.vcxproj index c4a96abdc4..71f8e0554d 100644 --- a/3rdparty/curl/libcurl.vcxproj +++ b/3rdparty/curl/libcurl.vcxproj @@ -33,8 +33,8 @@ <_ProjectFileVersion>10.0.30319.1 - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib\$(Configuration)-$(Platform)\ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)\ @@ -43,11 +43,11 @@ MaxSpeed OnlyExplicitInline - curl\include;curl\lib;extra;$(SolutionDir)3rdparty\wolfssl\wolfssl\wolfssl;$(SolutionDir)3rdparty\wolfssl\wolfssl;$(SolutionDir)3rdparty\wolfssl\extra\win32;%(AdditionalIncludeDirectories) - WOLFSSL_ALT_CERT_CHAINS;HAVE_SNI;NDEBUG;BUILDING_LIBCURL;CURL_STATICLIB;USE_WOLFSSL;NO_MD4;WOLFSSL_USER_SETTINGS;USE_IPV6;SIZEOF_LONG=4;SIZEOF_LONG_LONG=8;%(PreprocessorDefinitions) + curl\include;curl\lib;extra;$(SolutionDir)3rdparty\wolfssl;$(SolutionDir)3rdparty\wolfssl\wolfssl;%(AdditionalIncludeDirectories) + HAVE_SNI;NDEBUG;BUILDING_LIBCURL;CURL_STATICLIB;USE_WOLFSSL;USE_IPV6;SIZEOF_LONG=4;SIZEOF_LONG_LONG=8;%(PreprocessorDefinitions) true true - TurnOffAllWarnings + Level4 NDEBUG;%(PreprocessorDefinitions) @@ -62,33 +62,16 @@ - - - + + - - - - - - + - - - - - - - - - - - - + @@ -96,28 +79,25 @@ + + + - - - - - - + + - - @@ -125,30 +105,30 @@ - + + + - - - + + - @@ -156,9 +136,11 @@ - - + + + + @@ -167,7 +149,6 @@ - @@ -185,29 +166,23 @@ - + + + - - - - - - - - - + - - - + + + @@ -220,23 +195,26 @@ + + - + + + - @@ -252,37 +230,26 @@ - - - - - - - + + + + + + + - - - - - - - - - - - - - + + @@ -297,73 +264,67 @@ + + + + - - - - - + + - - - - - + - - - + - - - + + + - @@ -389,43 +350,37 @@ + - - - - - - - - - - + - - - - - + + + + + - + + + + - diff --git a/3rdparty/curl/libcurl.vcxproj.filters b/3rdparty/curl/libcurl.vcxproj.filters index 32eb05f40e..33417807a1 100644 --- a/3rdparty/curl/libcurl.vcxproj.filters +++ b/3rdparty/curl/libcurl.vcxproj.filters @@ -24,6 +24,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -42,6 +48,9 @@ Source Files + + Source Files + Source Files @@ -63,9 +72,18 @@ Source Files + + Source Files + Source Files + + Source Files + + + Source Files + Source Files @@ -87,6 +105,9 @@ Source Files + + Source Files + Source Files @@ -123,6 +144,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -132,6 +159,9 @@ Source Files + + Source Files + Source Files @@ -153,6 +183,9 @@ Source Files + + Source Files + Source Files @@ -162,6 +195,9 @@ Source Files + + Source Files + Source Files @@ -192,6 +228,18 @@ Source Files + + Source Files + + + Source Files + + + Source Files + + + Source Files + Source Files @@ -270,6 +318,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -279,6 +333,9 @@ Source Files + + Source Files + Source Files @@ -291,6 +348,15 @@ Source Files + + Source Files + + + Source Files + + + Source Files + Source Files @@ -327,7 +393,10 @@ Source Files - + + Source Files + + Source Files @@ -342,6 +411,9 @@ Source Files + + Source Files + Source Files @@ -351,6 +423,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -369,15 +447,24 @@ Source Files + + Source Files + Source Files + + Source Files + Source Files Source Files + + Source Files + Source Files @@ -396,159 +483,6 @@ Source Files - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - @@ -593,6 +527,12 @@ Header Files + + Header Files + + + Header Files + Header Files @@ -605,9 +545,21 @@ Header Files + + Header Files + + + Header Files + + + Header Files + Header Files + + Header Files + Header Files @@ -620,9 +572,15 @@ Header Files + + Header Files + Header Files + + Header Files + Header Files @@ -662,9 +620,18 @@ Header Files + + Header Files + Header Files + + Header Files + + + Header Files + Header Files @@ -677,6 +644,9 @@ Header Files + + Header Files + Header Files @@ -698,6 +668,9 @@ Header Files + + Header Files + Header Files @@ -728,6 +701,9 @@ Header Files + + Header Files + Header Files @@ -761,6 +737,9 @@ Header Files + + Header Files + Header Files @@ -779,6 +758,12 @@ Header Files + + Header Files + + + Header Files + Header Files @@ -794,6 +779,9 @@ Header Files + + Header Files + Header Files @@ -872,6 +860,9 @@ Header Files + + Header Files + Header Files @@ -884,6 +875,15 @@ Header Files + + Header Files + + + Header Files + + + Header Files + Header Files @@ -893,15 +893,24 @@ Header Files - + + Header Files + + Header Files Header Files + + Header Files + Header Files + + Header Files + Header Files @@ -911,6 +920,12 @@ Header Files + + Header Files + + + Header Files + Header Files @@ -926,18 +941,27 @@ Header Files + + Header Files + Header Files Header Files + + Header Files + Header Files Header Files + + Header Files + Header Files @@ -953,165 +977,6 @@ Header Files - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - diff --git a/3rdparty/discord-rpc/CMakeLists.txt b/3rdparty/discord-rpc/CMakeLists.txt index 2ad705d52c..34c7c4fdb9 100644 --- a/3rdparty/discord-rpc/CMakeLists.txt +++ b/3rdparty/discord-rpc/CMakeLists.txt @@ -1,15 +1,34 @@ # DiscordRPC - add_library(3rdparty_discordRPC INTERFACE) +# We don't want Discord Rich Presence on the BSDs and other OSes if (USE_DISCORD_RPC AND (WIN32 OR CMAKE_SYSTEM MATCHES "Linux" OR APPLE)) - set(BUILD_EXAMPLES FALSE CACHE BOOL "Build example apps") - set(ENABLE_IO_THREAD TRUE CACHE BOOL "Start up a separate I/O thread, otherwise I'd need to call an update function") - set(USE_STATIC_CRT FALSE CACHE BOOL "Use /MT[d] for dynamic library") - set(WARNINGS_AS_ERRORS FALSE CACHE BOOL "When enabled, compiles with `-Werror` (on *nix platforms).") + if (WIN32 AND NOT MSVC) + ExternalProject_Add(discordRPC + GIT_REPOSITORY https://github.com/discordapp/discord-rpc + BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} + INSTALL_COMMAND "" + ) - add_subdirectory(discord-rpc EXCLUDE_FROM_ALL) - target_include_directories(3rdparty_discordRPC INTERFACE discord-rpc/include) + endif() + + target_include_directories(3rdparty_discordRPC INTERFACE include) target_compile_definitions(3rdparty_discordRPC INTERFACE -DWITH_DISCORD_RPC) - target_link_libraries(3rdparty_discordRPC INTERFACE discord-rpc) + + set(DISCORD_RPC_LIB NOTFOUND) + if (WIN32) + if (NOT MSVC) + set(DISCORD_RPC_LIB ${CMAKE_CURRENT_BINARY_DIR}/src/libdiscord-rpc.a) + else() + find_library(DISCORD_RPC_LIB discord-rpc PATHS lib/ NO_DEFAULT_PATH) + endif() + elseif(CMAKE_SYSTEM MATCHES "Linux") + find_library(DISCORD_RPC_LIB discord-rpc-linux PATHS lib/ NO_DEFAULT_PATH) + elseif(APPLE) + find_library(DISCORD_RPC_LIB discord-rpc-mac PATHS lib/ NO_DEFAULT_PATH) + endif() + target_link_libraries(3rdparty_discordRPC INTERFACE ${DISCORD_RPC_LIB}) + if(APPLE) + target_link_libraries(3rdparty_discordRPC INTERFACE "objc" "-framework Foundation" "-framework CoreServices") + endif() endif() diff --git a/3rdparty/discord-rpc/discord-rpc b/3rdparty/discord-rpc/discord-rpc deleted file mode 160000 index 3dc2c326cb..0000000000 --- a/3rdparty/discord-rpc/discord-rpc +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3dc2c326cb4dc5815c6069970c13154898f58d48 diff --git a/3rdparty/discord-rpc/discord-rpc.vcxproj b/3rdparty/discord-rpc/discord-rpc.vcxproj deleted file mode 100644 index 2777c4e9df..0000000000 --- a/3rdparty/discord-rpc/discord-rpc.vcxproj +++ /dev/null @@ -1,78 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - {81b0d6d6-84e6-40c1-8dbd-47cbcb3051ad} - discord-rpc - - - - - - StaticLibrary - false - false - Unicode - x64 - - - - - - - - - - - - - - - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - - TurnOffAllWarnings - false - %(PreprocessorDefinitions) - ./discord-rpc/include;./discord-rpc/thirdparty/rapidjson-1.1.0/include;%(AdditionalIncludeDirectories) - MaxSpeed - - - - - - \ No newline at end of file diff --git a/3rdparty/discord-rpc/discord-rpc.vcxproj.filters b/3rdparty/discord-rpc/discord-rpc.vcxproj.filters deleted file mode 100644 index 9bac920a6d..0000000000 --- a/3rdparty/discord-rpc/discord-rpc.vcxproj.filters +++ /dev/null @@ -1,54 +0,0 @@ - - - - - {719448a4-8eab-4e75-b6b7-687e2b217490} - - - {7c0d57b3-e2ef-45c2-aa2d-2765e5c73279} - - - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - Header files - - - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - Source files - - - \ No newline at end of file diff --git a/3rdparty/discord-rpc/include/discord_register.h b/3rdparty/discord-rpc/include/discord_register.h new file mode 100644 index 0000000000..16fb42f328 --- /dev/null +++ b/3rdparty/discord-rpc/include/discord_register.h @@ -0,0 +1,26 @@ +#pragma once + +#if defined(DISCORD_DYNAMIC_LIB) +#if defined(_WIN32) +#if defined(DISCORD_BUILDING_SDK) +#define DISCORD_EXPORT __declspec(dllexport) +#else +#define DISCORD_EXPORT __declspec(dllimport) +#endif +#else +#define DISCORD_EXPORT __attribute__((visibility("default"))) +#endif +#else +#define DISCORD_EXPORT +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +DISCORD_EXPORT void Discord_Register(const char* applicationId, const char* command); +DISCORD_EXPORT void Discord_RegisterSteamGame(const char* applicationId, const char* steamId); + +#ifdef __cplusplus +} +#endif diff --git a/3rdparty/discord-rpc/include/discord_rpc.h b/3rdparty/discord-rpc/include/discord_rpc.h new file mode 100644 index 0000000000..3e1441e058 --- /dev/null +++ b/3rdparty/discord-rpc/include/discord_rpc.h @@ -0,0 +1,87 @@ +#pragma once +#include + +// clang-format off + +#if defined(DISCORD_DYNAMIC_LIB) +# if defined(_WIN32) +# if defined(DISCORD_BUILDING_SDK) +# define DISCORD_EXPORT __declspec(dllexport) +# else +# define DISCORD_EXPORT __declspec(dllimport) +# endif +# else +# define DISCORD_EXPORT __attribute__((visibility("default"))) +# endif +#else +# define DISCORD_EXPORT +#endif + +// clang-format on + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct DiscordRichPresence { + const char* state; /* max 128 bytes */ + const char* details; /* max 128 bytes */ + int64_t startTimestamp; + int64_t endTimestamp; + const char* largeImageKey; /* max 32 bytes */ + const char* largeImageText; /* max 128 bytes */ + const char* smallImageKey; /* max 32 bytes */ + const char* smallImageText; /* max 128 bytes */ + const char* partyId; /* max 128 bytes */ + int partySize; + int partyMax; + const char* matchSecret; /* max 128 bytes */ + const char* joinSecret; /* max 128 bytes */ + const char* spectateSecret; /* max 128 bytes */ + int8_t instance; +} DiscordRichPresence; + +typedef struct DiscordUser { + const char* userId; + const char* username; + const char* discriminator; + const char* avatar; +} DiscordUser; + +typedef struct DiscordEventHandlers { + void (*ready)(const DiscordUser* request); + void (*disconnected)(int errorCode, const char* message); + void (*errored)(int errorCode, const char* message); + void (*joinGame)(const char* joinSecret); + void (*spectateGame)(const char* spectateSecret); + void (*joinRequest)(const DiscordUser* request); +} DiscordEventHandlers; + +#define DISCORD_REPLY_NO 0 +#define DISCORD_REPLY_YES 1 +#define DISCORD_REPLY_IGNORE 2 + +DISCORD_EXPORT void Discord_Initialize(const char* applicationId, + DiscordEventHandlers* handlers, + int autoRegister, + const char* optionalSteamId); +DISCORD_EXPORT void Discord_Shutdown(void); + +/* checks for incoming messages, dispatches callbacks */ +DISCORD_EXPORT void Discord_RunCallbacks(void); + +/* If you disable the lib starting its own io thread, you'll need to call this from your own */ +#ifdef DISCORD_DISABLE_IO_THREAD +DISCORD_EXPORT void Discord_UpdateConnection(void); +#endif + +DISCORD_EXPORT void Discord_UpdatePresence(const DiscordRichPresence* presence); +DISCORD_EXPORT void Discord_ClearPresence(void); + +DISCORD_EXPORT void Discord_Respond(const char* userid, /* DISCORD_REPLY_ */ int reply); + +DISCORD_EXPORT void Discord_UpdateHandlers(DiscordEventHandlers* handlers); + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/3rdparty/discord-rpc/lib/discord-rpc.lib b/3rdparty/discord-rpc/lib/discord-rpc.lib new file mode 100644 index 0000000000..2f75226ca6 Binary files /dev/null and b/3rdparty/discord-rpc/lib/discord-rpc.lib differ diff --git a/3rdparty/discord-rpc/lib/libdiscord-rpc-linux.a b/3rdparty/discord-rpc/lib/libdiscord-rpc-linux.a new file mode 100644 index 0000000000..7297c5637b Binary files /dev/null and b/3rdparty/discord-rpc/lib/libdiscord-rpc-linux.a differ diff --git a/3rdparty/discord-rpc/lib/libdiscord-rpc-mac.a b/3rdparty/discord-rpc/lib/libdiscord-rpc-mac.a new file mode 100644 index 0000000000..4da36ac6ab Binary files /dev/null and b/3rdparty/discord-rpc/lib/libdiscord-rpc-mac.a differ diff --git a/3rdparty/ffmpeg b/3rdparty/ffmpeg index ec6367d3ba..e5fb13bbb0 160000 --- a/3rdparty/ffmpeg +++ b/3rdparty/ffmpeg @@ -1 +1 @@ -Subproject commit ec6367d3ba9d0d57b9d22d4b87da8144acaf428f +Subproject commit e5fb13bbb07ac3ba2e1998e2f5688f3714870d93 diff --git a/3rdparty/flatbuffers b/3rdparty/flatbuffers index 595bf0007a..9e7e8cbe9f 160000 --- a/3rdparty/flatbuffers +++ b/3rdparty/flatbuffers @@ -1 +1 @@ -Subproject commit 595bf0007ab1929570c7671f091313c8fc20644e +Subproject commit 9e7e8cbe9f675123dd41b7c62868acad39188cae diff --git a/3rdparty/fusion/CMakeLists.txt b/3rdparty/fusion/CMakeLists.txt deleted file mode 100644 index c58b2dfe9f..0000000000 --- a/3rdparty/fusion/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -# To avoid python numpy dependency in debug build, force release build of this library -set(ORIG_BUILD_TYPE ${CMAKE_BUILD_TYPE}) -set(CMAKE_BUILD_TYPE Release) -add_subdirectory(fusion EXCLUDE_FROM_ALL) -set(CMAKE_BUILD_TYPE ${ORIG_BUILD_TYPE}) diff --git a/3rdparty/fusion/fusion b/3rdparty/fusion/fusion deleted file mode 160000 index 066d4a63b2..0000000000 --- a/3rdparty/fusion/fusion +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 066d4a63b2c714b20b0a8073a01fda7c5c6763f6 diff --git a/3rdparty/fusion/fusion.vcxproj b/3rdparty/fusion/fusion.vcxproj deleted file mode 100644 index 71e669be00..0000000000 --- a/3rdparty/fusion/fusion.vcxproj +++ /dev/null @@ -1,75 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - - - - - - - - - - - - - - - - Fusion - {3C67A2FF-4710-402A-BE3E-31B0CB0576DF} - - - - - - StaticLibrary - Unicode - - - - x64 - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - - - - - - - - - - - - - - - - - Sync - MaxSpeed - - - - - - \ No newline at end of file diff --git a/3rdparty/fusion/fusion.vcxproj.filters b/3rdparty/fusion/fusion.vcxproj.filters deleted file mode 100644 index 85764a4f97..0000000000 --- a/3rdparty/fusion/fusion.vcxproj.filters +++ /dev/null @@ -1,29 +0,0 @@ - - - - - cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx - - - - - - - - - - - - - - - Source Files - - - Source Files - - - Source Files - - - \ No newline at end of file diff --git a/3rdparty/glslang/glslang b/3rdparty/glslang/glslang index fc9889c889..ae2a562936 160000 --- a/3rdparty/glslang/glslang +++ b/3rdparty/glslang/glslang @@ -1 +1 @@ -Subproject commit fc9889c889561c5882e83819dcaffef5ed45529b +Subproject commit ae2a562936cc8504c9ef2757cceaff163147834f diff --git a/3rdparty/glslang/glslang.vcxproj b/3rdparty/glslang/glslang.vcxproj index 298fd149c6..a305e3ec01 100644 --- a/3rdparty/glslang/glslang.vcxproj +++ b/3rdparty/glslang/glslang.vcxproj @@ -38,62 +38,27 @@ "Visual Studio $(VisualStudioVersion.Substring(0,2))" - call vsdevcmd.bat -arch=amd64 - cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Release" -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DGLSLANG_TESTS=OFF -DENABLE_GLSLANG_BINARIES=OFF -DBUILD_EXTERNAL=OFF -DENABLE_SPVREMAPPER=OFF -DENABLE_HLSL=OFF -DENABLE_OPT=OFF -S glslang -B "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" - - call vsdevcmd.bat -arch=amd64 - cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Debug" -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreadedDebug -DGLSLANG_TESTS=OFF -DENABLE_GLSLANG_BINARIES=OFF -DBUILD_EXTERNAL=OFF -DENABLE_SPVREMAPPER=OFF -DENABLE_HLSL=OFF -DENABLE_OPT=OFF -S glslang -B "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" - - - echo Copying.. - mkdir "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" - copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\SPIRV\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" - copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\glslang\OSDependent\Windows\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" - copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\glslang\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" - - - echo Cleaning.. - rmdir /s /q "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" - rmdir /s /q "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" - + cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Release" -DLLVM_USE_CRT_DEBUG=MTd -DLLVM_USE_CRT_RELEASE=MT -S glslang -B build + cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Debug" -DLLVM_USE_CRT_DEBUG=MTd -DLLVM_USE_CRT_RELEASE=MT -S glslang -B build $([System.IO.Path]::GetFullPath('$(MSBuildThisFileDirectory)..\..\buildfiles\msvc\common_default.props')) - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - $(CmakeReleaseCLI) - msbuild.exe "$(IntDir)\ALL_BUILD.vcxproj" /t:build /p:Configuration=Release /p:ForceImportBeforeCppTargets="$(PropsAbsPath)" /m - $(CmakeCopyCLI) - - - $(CmakeReleaseCLI) - msbuild.exe "$(IntDir)\ALL_BUILD.vcxproj" /t:rebuild /p:Configuration=Release /p:ForceImportBeforeCppTargets="$(PropsAbsPath)" /m - $(CmakeCopyCLI) - - - $(CmakeReleaseCLI) - msbuild.exe "$(IntDir)\ALL_BUILD.vcxproj" /t:clean /p:Configuration=Release /p:ForceImportBeforeCppTargets="$(PropsAbsPath)" /m - $(CmakeCleanCLI) - + $(CmakeReleaseCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:build /p:Configuration=Release /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + $(CmakeReleaseCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:rebuild /p:Configuration=Release /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m + $(CmakeReleaseCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:clean /p:Configuration=Release /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - $(CmakeDebugCLI) - msbuild.exe "$(IntDir)\ALL_BUILD.vcxproj" /t:build /p:Configuration=Debug /p:ForceImportBeforeCppTargets="$(PropsAbsPath)" /m - $(CmakeCopyCLI) + $(CmakeDebugCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:build /p:Configuration=Debug /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m - - $(CmakeDebugCLI) - msbuild.exe "$(IntDir)\ALL_BUILD.vcxproj" /t:rebuild /p:Configuration=Debug /p:ForceImportBeforeCppTargets="$(PropsAbsPath)" /m - $(CmakeCopyCLI) + $(CmakeDebugCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:rebuild /p:Configuration=Debug /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m - - $(CmakeDebugCLI) - msbuild.exe "$(IntDir)\ALL_BUILD.vcxproj" /t:clean /p:Configuration=Debug /p:ForceImportBeforeCppTargets="$(PropsAbsPath)" /m - $(CmakeCleanCLI) + $(CmakeDebugCLI) + msbuild.exe build\ALL_BUILD.vcxproj /t:clean /p:Configuration=Debug /p:ForceImportBeforeCppTargets=$(PropsAbsPath) /m diff --git a/3rdparty/hidapi/CMakeLists.txt b/3rdparty/hidapi/CMakeLists.txt index e1e36ac13e..da0a8e1907 100644 --- a/3rdparty/hidapi/CMakeLists.txt +++ b/3rdparty/hidapi/CMakeLists.txt @@ -1,22 +1,25 @@ # hidapi -set(BUILD_SHARED_LIBS FALSE CACHE BOOL "Don't build shared libs") -set(HIDAPI_INSTALL_TARGETS FALSE CACHE BOOL "Don't install anything") - -if(CMAKE_SYSTEM MATCHES "Linux") - set(HIDAPI_WITH_LIBUSB FALSE CACHE BOOL "Don't build with libusb for linux") -endif() - add_library(3rdparty_hidapi INTERFACE) -add_subdirectory(hidapi EXCLUDE_FROM_ALL) +target_include_directories(3rdparty_hidapi INTERFACE hidapi/hidapi) if(APPLE) - target_link_libraries(3rdparty_hidapi INTERFACE hidapi_darwin "-framework CoreFoundation" "-framework IOKit") + add_subdirectory(hidapi/mac EXCLUDE_FROM_ALL) + target_include_directories(hidapi-mac PUBLIC hidapi/hidapi) + + target_link_libraries(3rdparty_hidapi INTERFACE hidapi-mac "-framework CoreFoundation" "-framework IOKit") elseif(CMAKE_SYSTEM MATCHES "Linux") + add_subdirectory(hidapi/linux EXCLUDE_FROM_ALL) + target_include_directories(hidapi-hidraw PUBLIC hidapi/hidapi) + target_link_libraries(3rdparty_hidapi INTERFACE hidapi-hidraw udev) elseif(WIN32) - target_link_libraries(3rdparty_hidapi INTERFACE hidapi::hidapi hidapi::include Shlwapi.lib) -elseif(ANDROID) - target_link_libraries(3rdparty_hidapi INTERFACE hidapi::libusb) + add_subdirectory(hidapi/windows EXCLUDE_FROM_ALL) + target_include_directories(hidapi-hid PUBLIC hidapi/hidapi) + + target_link_libraries(3rdparty_hidapi INTERFACE hidapi-hid Shlwapi.lib) else() + add_subdirectory(hidapi/libusb EXCLUDE_FROM_ALL) + target_include_directories(hidapi-libusb PUBLIC hidapi/hidapi) + target_link_libraries(3rdparty_hidapi INTERFACE hidapi-libusb usb) endif() diff --git a/3rdparty/hidapi/hidapi b/3rdparty/hidapi/hidapi index f42423643e..01f601a150 160000 --- a/3rdparty/hidapi/hidapi +++ b/3rdparty/hidapi/hidapi @@ -1 +1 @@ -Subproject commit f42423643ec9011c98cccc0bb790722bbbd3f30b +Subproject commit 01f601a1509bf9c67819fbf521df39644bab52d5 diff --git a/3rdparty/hidapi/hidapi.vcxproj b/3rdparty/hidapi/hidapi.vcxproj index 6f6dc198e2..89dd74c0a8 100644 --- a/3rdparty/hidapi/hidapi.vcxproj +++ b/3rdparty/hidapi/hidapi.vcxproj @@ -44,13 +44,13 @@ true - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib/ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/ false - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib/ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/ @@ -101,17 +101,6 @@ - - - - - - - - - - - diff --git a/3rdparty/hidapi/hidapi.vcxproj.filters b/3rdparty/hidapi/hidapi.vcxproj.filters index 91646f10b4..74f1b00db0 100644 --- a/3rdparty/hidapi/hidapi.vcxproj.filters +++ b/3rdparty/hidapi/hidapi.vcxproj.filters @@ -24,33 +24,4 @@ Header Files - - - Source Files - - - Source Files - - - Source Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - \ No newline at end of file diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt index f24c2c9709..d1c3b58bcd 100644 --- a/3rdparty/libpng/CMakeLists.txt +++ b/3rdparty/libpng/CMakeLists.txt @@ -2,13 +2,14 @@ # Select the version of libpng to use, default is builtin if (NOT USE_SYSTEM_LIBPNG) # We use libpng's static library and don't need to build the shared library and run the tests - set(PNG_SHARED OFF CACHE BOOL "Build shared lib") - set(PNG_TESTS OFF CACHE BOOL "Build libpng tests") + set(PNG_SHARED OFF CACHE BOOL "Build shared lib" FORCE) + set(PNG_TESTS OFF CACHE BOOL "Build libpng tests" FORCE) + set(PNG_BUILD_ZLIB ON CACHE BOOL "ZLIB is already build or package is found" FORCE) set(SKIP_INSTALL_ALL ON) add_subdirectory(libpng EXCLUDE_FROM_ALL) target_include_directories(png_static INTERFACE "${libpng_BINARY_DIR}" "${libpng_SOURCE_DIR}") - set(LIBPNG_TARGET png_static PARENT_SCOPE) + set(LIBPNG_TARGET png_static PARENT_SCOPE) else() find_package(PNG REQUIRED) diff --git a/3rdparty/libpng/libpng b/3rdparty/libpng/libpng index 2b978915d8..a40189cf88 160000 --- a/3rdparty/libpng/libpng +++ b/3rdparty/libpng/libpng @@ -1 +1 @@ -Subproject commit 2b978915d82377df13fcbb1fb56660195ded868a +Subproject commit a40189cf881e9f0db80511c382292a5604c3c3d1 diff --git a/3rdparty/libpng/libpng.vcxproj b/3rdparty/libpng/libpng.vcxproj index 7e781065a6..658a9acf5f 100644 --- a/3rdparty/libpng/libpng.vcxproj +++ b/3rdparty/libpng/libpng.vcxproj @@ -1,12 +1,12 @@  - - Debug + + Debug Library x64 - - Release + + Release Library x64 @@ -19,11 +19,11 @@ - + StaticLibrary MultiByte - + StaticLibrary MultiByte @@ -33,28 +33,28 @@ - + - + - + false $(ProjectName)16 - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib/ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/ - + false $(ProjectName)16 - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib/ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/ - + Use $(WarningLevel) @@ -68,28 +68,32 @@ false false pngpriv.h + true CompileAsC true $(DisableSpecificWarnings) $(ZLibSrcDir);%(AdditionalIncludeDirectories) $(TreatWarningAsError) + MultiThreadedDebug Windows true - + $(WarningLevel) Use ProgramDatabase + MultiThreaded true true WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions) false false pngpriv.h + true CompileAsC true false @@ -111,8 +115,8 @@ - Create - Create + Create + Create @@ -131,8 +135,8 @@ - true - true + true + true diff --git a/3rdparty/libpng/pnglibconf.vcxproj b/3rdparty/libpng/pnglibconf.vcxproj index 6c43778af8..e4a22c8af6 100644 --- a/3rdparty/libpng/pnglibconf.vcxproj +++ b/3rdparty/libpng/pnglibconf.vcxproj @@ -32,8 +32,7 @@ Build - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)\ diff --git a/3rdparty/libsdl-org/CMakeLists.txt b/3rdparty/libsdl-org/CMakeLists.txt deleted file mode 100644 index ba59cec8f7..0000000000 --- a/3rdparty/libsdl-org/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -option(SDL_SHARED "Build a shared version of the library" OFF) -option(SDL_STATIC "Build a static version of the library" ON) -option(SDL_TEST_LIBRARY "Build the SDL3_test library" OFF) -add_subdirectory(SDL EXCLUDE_FROM_ALL) diff --git a/3rdparty/libsdl-org/SDL b/3rdparty/libsdl-org/SDL deleted file mode 160000 index c9a6709bd2..0000000000 --- a/3rdparty/libsdl-org/SDL +++ /dev/null @@ -1 +0,0 @@ -Subproject commit c9a6709bd21750f1ad9597be21abace78c6378c9 diff --git a/3rdparty/libsdl-org/SDL.vcxproj b/3rdparty/libsdl-org/SDL.vcxproj deleted file mode 100644 index 81b7d853a5..0000000000 --- a/3rdparty/libsdl-org/SDL.vcxproj +++ /dev/null @@ -1,540 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {8DC244EE-A0BD-4038-BAF7-CFAFA5EB2BAA} - - - - - - StaticLibrary - false - true - Unicode - x64 - - - - - - - - - - - - - - - - - SDL\src;$(IncludePath) - - - - TurnOffAllWarnings - false - SDL\include;SDL\include\build_config;%(AdditionalIncludeDirectories) - ProgramDatabase - MaxSpeed - - - - - - \ No newline at end of file diff --git a/3rdparty/libsdl-org/SDL.vcxproj.filters b/3rdparty/libsdl-org/SDL.vcxproj.filters deleted file mode 100644 index 44d4e4ec9f..0000000000 --- a/3rdparty/libsdl-org/SDL.vcxproj.filters +++ /dev/null @@ -1,1592 +0,0 @@ - - - - - {395b3af0-33d0-411b-b153-de1676bf1ef8} - - - {5a3e3167-75be-414f-8947-a5306df372b2} - - - {546d9ed1-988e-49d3-b1a5-e5b3d19de6c1} - - - {a56247ff-5108-4960-ba6a-6814fd1554ec} - - - {8880dfad-2a06-4e84-ab6e-6583641ad2d1} - - - {2b996a7f-f3e9-4300-a97f-2c907bcd89a9} - - - {5713d682-2bc7-4da4-bcf0-262a98f142eb} - - - {5e27e19f-b3f8-4e2d-b323-b00b2040ec86} - - - {a3ab9cff-8495-4a5c-8af6-27e43199a712} - - - {377061e4-3856-4f05-b916-0d3b360df0f6} - - - {226a6643-1c65-4c7f-92aa-861313d974bb} - - - {ef859522-a7fe-4a00-a511-d6a9896adf5b} - - - {01fd2642-4493-4316-b548-fb829f4c9125} - - - {cce7558f-590a-4f0a-ac0d-e579f76e588e} - - - {7a53c9e4-d4bd-40ed-9265-1625df685121} - - - {4c7a051c-ce7c-426c-bf8c-9187827f9052} - - - {97e2f79f-311b-42ea-81b2-e801649fdd93} - - - {baf97c8c-7e90-41e5-bff8-14051b8d3956} - - - {45e50d3a-56c9-4352-b811-0c60c49a2431} - - - {9d86e0ef-d6f6-4db2-bfc5-b3529406fa8d} - - - {b35fa13c-6ed2-4680-8c56-c7d71b76ceab} - - - {61b61b31-9e26-4171-a3bb-b969f1889726} - - - {f63aa216-6ee7-4143-90d3-32be3787f276} - - - {90bee923-89df-417f-a6c3-3e260a7dd54d} - - - {4c8ad943-c2fb-4014-9ca3-041e0ad08426} - - - {3d68ae70-a9ff-46cf-be69-069f0b02aca0} - - - {ebc2fca3-3c26-45e3-815e-3e0581d5e226} - - - {47c445a2-7014-4e15-9660-7c89a27dddcf} - - - {d008487d-6ed0-4251-848b-79a68e3c1459} - - - {c9e8273e-13ae-47dc-bef8-8ad8e64c9a3e} - - - {c9e8273e-13ae-47dc-bef8-8ad8e64c9a3d} - - - {0b8e136d-56ae-47e7-9981-e863a57ac616} - - - {bf3febd3-9328-43e8-b196-0fd3be8177dd} - - - {1a62dc68-52d2-4c07-9d81-d94dfe1d0d12} - - - {e9f01b22-34b3-4380-ade6-0e96c74e9c90} - - - {f674f22f-7841-4f3a-974e-c36b2d4823fc} - - - {d7ad92de-4e55-4202-9b2b-1bd9a35fe4dc} - - - {8311d79d-9ad5-4369-99fe-b2fb2659d402} - - - {6c4dfb80-fdf9-497c-a6ff-3cd8f22efde9} - - - {4810e35c-33cb-4da2-bfaf-452da20d3c9a} - - - {2cf93f1d-81fd-4bdc-998c-5e2fa43988bc} - - - {5752b7ab-2344-4f38-95ab-b5d3bc150315} - - - {7a0eae3d-f113-4914-b926-6816d1929250} - - - {ee602cbf-96a2-4b0b-92a9-51d38a727411} - - - {a812185b-9060-4a1c-8431-be4f66894626} - - - {31c16cdf-adc4-4950-8293-28ba530f3882} - - - {add61b53-8144-47d6-bd67-3420a87c4905} - - - {e7cdcf36-b462-49c7-98b7-07ea7b3687f4} - - - {82588eef-dcaa-4f69-b2a9-e675940ce54c} - - - {560239c3-8fa1-4d23-a81a-b8408b2f7d3f} - - - {81711059-7575-4ece-9e68-333b63e992c4} - - - {1e44970f-7535-4bfb-b8a5-ea0cea0349e0} - - - {1dd91224-1176-492b-a2cb-e26153394db0} - - - {e3ecfe50-cf22-41d3-8983-2fead5164b47} - - - {5521d22f-1e52-47a6-8c52-06a3b6bdefd7} - - - {4755f3a6-49ac-46d6-86be-21f5c21f2197} - - - {f48c2b17-1bee-4fec-a7c8-24cf619abe08} - - - {653672cc-90ae-4eba-a256-6479f2c31804} - - - {00001967ea2801028a046a722a070000} - - - {0000ddc7911820dbe64274d3654f0000} - - - {0000de1b75e1a954834693f1c81e0000} - - - {0000fc2700d453b3c8d79fe81e1c0000} - - - {0000fbfe2d21e4f451142e7d0e870000} - - - {5115ba31-20f8-4eab-a8c5-6a572ab78ff7} - - - {00003288226ff86b99eee5b443e90000} - - - {0000d7fda065b13b0ca4ab262c380000} - - - {098fbef9-d8a0-4b3b-b57b-d157d395335d} - - - {00008dfdfa0190856fbf3c7db52d0000} - - - {748cf015-00b8-4e71-ac48-02e947e4d93d} - - - {00009d5ded166cc6c6680ec771a30000} - - - {00004d6806b6238cae0ed62db5440000} - - - {000028b2ea36d7190d13777a4dc70000} - - - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - API Headers - - - camera - - - camera - - - filesystem - - - io - - - io - - - main - - - - - - API Headers - - - API Headers - - - API Headers - - - audio - - - audio - - - audio - - - audio - - - audio - - - audio - - - core\windows - - - core\windows - - - core\windows - - - core\windows - - - core\windows - - - core\windows - - - core\windows - - - cpuinfo - - - dynapi - - - dynapi - - - dynapi - - - dynapi - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - haptic - - - haptic - - - joystick - - - joystick - - - joystick - - - joystick - - - joystick - - - joystick - - - joystick - - - libm - - - libm - - - hidapi\hidapi - - - locale - - - misc - - - audio\directsound - - - audio\disk - - - audio\dummy - - - audio\wasapi - - - haptic\windows - - - haptic\windows - - - joystick\hidapi - - - joystick\hidapi - - - joystick\windows - - - joystick\windows - - - joystick\windows - - - joystick\windows - - - joystick\virtual - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video\dummy - - - video\dummy - - - video\dummy - - - video\yuv2rgb - - - video\yuv2rgb - - - video\yuv2rgb - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - timer - - - thread - - - thread - - - thread\windows - - - thread\windows - - - thread\generic - - - sensor - - - sensor - - - sensor\dummy - - - sensor\windows - - - render - - - render - - - render - - - render\direct3d - - - render\direct3d11 - - - render\opengl - - - render\opengl - - - render\opengles2 - - - render\opengles2 - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - power - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - video\khronos\vulkan - - - - - - - - render\direct3d12 - - - - - - - - - - - - render\vulkan - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - API Headers - - - gpu - - - gpu - - - - - - - - - camera\dummy - - - camera\mediafoundation - - - camera - - - dialog - - - dialog - - - filesystem - - - filesystem\windows - - - io\generic - - - io - - - io\windows - - - main\generic - - - main - - - main - - - main\windows - - - - - - - - - - - - audio - - - audio - - - audio - - - audio - - - audio - - - audio - - - audio - - - audio - - - atomic - - - atomic - - - core - - - core\windows - - - core\windows - - - core\windows - - - core\windows - - - core\windows - - - cpuinfo - - - dialog - - - dynapi - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - events - - - io - - - filesystem\windows - - - haptic - - - hidapi - - - joystick - - - joystick - - - joystick - - - joystick - - - libm - - - loadso\windows - - - misc - - - misc\windows - - - locale\windows - - - locale - - - audio\directsound - - - audio\disk - - - audio\dummy - - - audio\wasapi - - - haptic\windows - - - haptic\windows - - - haptic\dummy - - - joystick\dummy - - - joystick\gdk - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\hidapi - - - joystick\windows - - - joystick\windows - - - joystick\windows - - - joystick\windows - - - joystick\windows - - - joystick\virtual - - - time - - - time\windows - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video - - - video\dummy - - - video\dummy - - - video\dummy - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - video\windows - - - timer - - - timer\windows - - - thread - - - thread\windows - - - thread\windows - - - thread\windows - - - thread\windows - - - thread\windows - - - thread\windows - - - thread\generic - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - stdlib - - - sensor - - - sensor\dummy - - - sensor\windows - - - render - - - render - - - render - - - render - - - render\direct3d - - - render\direct3d - - - render\direct3d11 - - - render\direct3d11 - - - render\opengl - - - render\opengl - - - render\opengles2 - - - render\opengles2 - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - render\software - - - power - - - power\windows - - - - render\direct3d12 - - - render\direct3d12 - - - core\windows - - - stdlib - - - - - - - - render\vulkan - - - render\vulkan - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - video\offscreen - - - gpu - - - gpu - - - gpu - - - - - - - - - - - - - - \ No newline at end of file diff --git a/3rdparty/libusb/CMakeLists.txt b/3rdparty/libusb/CMakeLists.txt index 0fc66f2429..21f3e019ef 100644 --- a/3rdparty/libusb/CMakeLists.txt +++ b/3rdparty/libusb/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required(VERSION 2.8.4) + project(libusb) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules") @@ -11,7 +13,7 @@ option(WITHOUT_PTHREADS "force pthreads to not be used. if on, then they are us set(LIBUSB_MAJOR 1) set(LIBUSB_MINOR 0) -set(LIBUSB_MICRO 26) +set(LIBUSB_MICRO 23) macro(append_compiler_flags) foreach(FLAG IN ITEMS ${ARGN}) diff --git a/3rdparty/libusb/config.cmake b/3rdparty/libusb/config.cmake index 1a9833eb9b..57f19f07a6 100644 --- a/3rdparty/libusb/config.cmake +++ b/3rdparty/libusb/config.cmake @@ -89,11 +89,7 @@ set(PACKAGE_URL "http://www.libusb.org" CACHE INTERNAL "package url" FORCE) set(PACKAGE_TARNAME "libusb" CACHE INTERNAL "tarball name" FORCE) set(VERSION "${PACKAGE_VERSION}" CACHE INTERNAL "version" FORCE) -if(MSVC) - file(COPY libusb/msvc/config.h DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) -else() - configure_file(config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config.h @ONLY) -endif() +configure_file(config.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/config.h @ONLY) message(STATUS "Generated configuration file in ${CMAKE_CURRENT_BINARY_DIR}/config.h") # for generated config.h diff --git a/3rdparty/libusb/config.h.cmake b/3rdparty/libusb/config.h.cmake index 60db929d54..dbf72bfa39 100644 --- a/3rdparty/libusb/config.h.cmake +++ b/3rdparty/libusb/config.h.cmake @@ -1,8 +1,6 @@ #ifndef LIBUSB_CONFIG_H #define LIBUSB_CONFIG_H -#define PRINTF_FORMAT(a, b) __attribute__ ((__format__ (__printf__, a, b))) - #define DEFAULT_VISIBILITY @DEFAULT_VISIBILITY@ #cmakedefine ENABLE_DEBUG_LOGGING diff --git a/3rdparty/libusb/libusb b/3rdparty/libusb/libusb index 15a7ebb4d4..c33990a300 160000 --- a/3rdparty/libusb/libusb +++ b/3rdparty/libusb/libusb @@ -1 +1 @@ -Subproject commit 15a7ebb4d426c5ce196684347d2b7cafad862626 +Subproject commit c33990a300674e24f47ff0f172f7efb10b63b88a diff --git a/3rdparty/libusb/libusb.cmake b/3rdparty/libusb/libusb.cmake index 6de23c3528..7df04c32e0 100644 --- a/3rdparty/libusb/libusb.cmake +++ b/3rdparty/libusb/libusb.cmake @@ -3,6 +3,22 @@ include(os.cmake) include(config.cmake) include(FindThreads) +set (LIBUSB_COMMON + core.c + descriptor.c + io.c + sync.c + hotplug.c + strerror.c + libusb-1.0.rc + libusb-1.0.def +) + +foreach(SRC IN LISTS LIBUSB_COMMON) + list(APPEND LIBUSB_COMMON_FINAL ${LIBUSB_SOURCE_DIR}/libusb/${SRC}) +endforeach() + + include_directories(${LIBUSB_SOURCE_DIR}/libusb) include_directories(${LIBUSB_SOURCE_DIR}/libusb/os) @@ -12,14 +28,7 @@ endif() add_library(usb-1.0-static STATIC - libusb/libusb/core.c - libusb/libusb/descriptor.c - libusb/libusb/io.c - libusb/libusb/sync.c - libusb/libusb/hotplug.c - libusb/libusb/strerror.c - libusb/libusb/libusb-1.0.rc - libusb/libusb/libusb-1.0.def + ${LIBUSB_COMMON_FINAL} ${LIBUSB_PLATFORM} ) diff --git a/3rdparty/libusb/libusb_static.vcxproj b/3rdparty/libusb/libusb_static.vcxproj index 66dfc2ac29..703b5bdb25 100644 --- a/3rdparty/libusb/libusb_static.vcxproj +++ b/3rdparty/libusb/libusb_static.vcxproj @@ -35,8 +35,8 @@ <_ProjectFileVersion>10.0.30319.1 - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ + $(SolutionDir)lib/ + $(SolutionDir)tmp\$(ProjectName)-$(Configuration)-$(Platform)/ libusb-1.0 @@ -50,9 +50,11 @@ ProgramDatabase Disabled + MultiThreadedDebug NDEBUG;%(PreprocessorDefinitions) + MultiThreaded $(OutDir)libusb-1.0.lib diff --git a/3rdparty/libusb/os.cmake b/3rdparty/libusb/os.cmake index dc87180272..cde292bc0a 100644 --- a/3rdparty/libusb/os.cmake +++ b/3rdparty/libusb/os.cmake @@ -5,43 +5,42 @@ if (CMAKE_USE_PTHREADS_INIT) set(PTHREADS_ENABLED TRUE) endif() -if (CYGWIN) +if (WIN32 OR "${CMAKE_SYSTEM_NAME}" STREQUAL "CYGWIN") add_compile_definitions(PLATFORM_WINDOWS=1) - set(OS_WINDOWS 1) + set(OS_WINDOWS 1 CACHE INTERNAL "controls config.h macro definition" FORCE) - message(STATUS "Detected Cygwin") - set(PTHREADS_ENABLED TRUE) - set(WITHOUT_POLL_H TRUE) + # Enable MingW support for RC language (for CMake pre-2.8) + if (MINGW) + set(CMAKE_RC_COMPILER_INIT windres) + set(CMAKE_RC_COMPILE_OBJECT " -O coff -i -o ") + endif() + enable_language(RC) + + if ("${CMAKE_SYSTEM_NAME}" STREQUAL "CYGWIN") + message(STATUS "Detected cygwin") + set(PTHREADS_ENABLED TRUE) + set(WITHOUT_POLL_H TRUE CACHE INTERNAL "Disable using poll.h even if it's available - use windows poll instead fo cygwin's" FORCE) + endif() list(APPEND PLATFORM_SRC events_windows.c windows_usbdk.c windows_common.c windows_winusb.c + threads_windows.c ) if (PTHREADS_ENABLED AND NOT WITHOUT_PTHREADS) list(APPEND PLATFORM_SRC threads_posix) + else() + list(APPEND PLATFORM_SRC threads_windows.c) endif() -elseif(WIN32) - add_compile_definitions(PLATFORM_WINDOWS=1) - set(OS_WINDOWS 1) - set(PTHREADS_ENABLED FALSE) - - list(APPEND PLATFORM_SRC - events_windows.c - windows_usbdk.c - windows_common.c - windows_winusb.c - ) - - list(APPEND PLATFORM_SRC threads_windows.c) elseif (APPLE) # Apple != OSX alone add_compile_definitions(PLATFORM_POSIX=1 HAVE_CLOCK_GETTIME) - set(OS_DARWIN 1) + set(OS_DARWIN 1 CACHE INTERNAL "controls config.h macro definition" FORCE) - if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") + if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Darwin") set(PLATFORM_SRC darwin_usb.c threads_posix.c @@ -50,7 +49,6 @@ elseif (APPLE) find_package(IOKit REQUIRED) list(APPEND LIBUSB_LIBRARIES ${IOKit_LIBRARIES}) - list(APPEND LIBUSB_LIBRARIES "-framework Security") # Currently only objc_registerThreadWithCollector requires linking against it # which is only for MAC_OS_X_VERSION_MIN_REQUIRED >= 1060 @@ -77,8 +75,8 @@ int main() elseif (UNIX) # Unix is for all *NIX systems including OSX add_compile_definitions(PLATFORM_POSIX=1 HAVE_CLOCK_GETTIME) - if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux" OR ANDROID) - set(OS_LINUX 1) + if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") + set(OS_LINUX 1 CACHE INTERNAL "controls config.h macro definition" FORCE) set(PLATFORM_SRC linux_usbfs.c @@ -87,9 +85,7 @@ elseif (UNIX) events_posix.c ) - if(NOT ANDROID) - list(APPEND LIBUSB_LIBRARIES rt) - endif() + list(APPEND LIBUSB_LIBRARIES rt) endif() endif() @@ -111,4 +107,4 @@ set(LIBUSB_LIBRARIES ${LIBUSB_LIBRARIES} PARENT_SCOPE) if (WITHOUT_PTHREADS) set(PTHREADS_ENABLED FALSE) endif() -set(THREADS_POSIX ${PTHREADS_ENABLED}) +set(THREADS_POSIX ${PTHREADS_ENABLED} CACHE INTERNAL "use pthreads" FORCE) diff --git a/3rdparty/llvm.cmake b/3rdparty/llvm.cmake new file mode 100644 index 0000000000..2f695f26f5 --- /dev/null +++ b/3rdparty/llvm.cmake @@ -0,0 +1,65 @@ +if(WITH_LLVM) + if(BUILD_LLVM_SUBMODULE) + message(STATUS "LLVM will be built from the submodule.") + + set(LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "") + option(LLVM_BUILD_RUNTIME OFF) + option(LLVM_BUILD_TOOLS OFF) + option(LLVM_INCLUDE_BENCHMARKS OFF) + option(LLVM_INCLUDE_DOCS OFF) + option(LLVM_INCLUDE_EXAMPLES OFF) + option(LLVM_INCLUDE_TESTS OFF) + option(LLVM_INCLUDE_TOOLS OFF) + option(LLVM_INCLUDE_UTILS OFF) + option(LLVM_CCACHE_BUILD ON) + + set(CXX_FLAGS_OLD ${CMAKE_CXX_FLAGS}) + + if (MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /D _SILENCE_ALL_CXX17_DEPRECATION_WARNINGS") + endif() + + # LLVM needs to be built out-of-tree + add_subdirectory(${CMAKE_SOURCE_DIR}/llvm ${CMAKE_CURRENT_BINARY_DIR}/llvm_build EXCLUDE_FROM_ALL) + set(LLVM_DIR "${CMAKE_CURRENT_BINARY_DIR}/llvm_build/lib/cmake/llvm/") + + set(CMAKE_CXX_FLAGS ${CXX_FLAGS_OLD}) + + # now tries to find LLVM again + find_package(LLVM 11.0 CONFIG) + if(NOT LLVM_FOUND) + message(FATAL_ERROR "Couldn't build LLVM from the submodule. You might need to run `git submodule update --init`") + endif() + + else() + message(STATUS "Using prebuilt LLVM") + + if (LLVM_DIR AND NOT IS_ABSOLUTE "${LLVM_DIR}") + # change relative LLVM_DIR to be relative to the source dir + set(LLVM_DIR ${CMAKE_SOURCE_DIR}/${LLVM_DIR}) + endif() + + find_package(LLVM 11.0 CONFIG) + + if (NOT LLVM_FOUND) + if (LLVM_VERSION AND LLVM_VERSION_MAJOR LESS 11) + message(FATAL_ERROR "Found LLVM version ${LLVM_VERSION}. Required version 11.0. \ + Enable BUILD_LLVM_SUBMODULE option to build LLVM from included as a git submodule.") + endif() + + message(FATAL_ERROR "Can't find LLVM libraries from the CMAKE_PREFIX_PATH path or LLVM_DIR. \ + Enable BUILD_LLVM_SUBMODULE option to build LLVM from included as a git submodule.") + endif() + endif() + + set(LLVM_LIBS LLVMMCJIT LLVMX86CodeGen LLVMX86AsmParser) + + add_library(3rdparty_llvm INTERFACE) + target_link_libraries(3rdparty_llvm INTERFACE ${LLVM_LIBS}) + target_include_directories(3rdparty_llvm INTERFACE ${LLVM_INCLUDE_DIRS}) + target_compile_definitions(3rdparty_llvm INTERFACE ${LLVM_DEFINITIONS} -DLLVM_AVAILABLE) + + add_library(3rdparty::llvm ALIAS 3rdparty_llvm) +else() + add_library(3rdparty::llvm ALIAS 3rdparty_dummy_lib) +endif() diff --git a/3rdparty/llvm/CMakeLists.txt b/3rdparty/llvm/CMakeLists.txt deleted file mode 100644 index d1295886d8..0000000000 --- a/3rdparty/llvm/CMakeLists.txt +++ /dev/null @@ -1,117 +0,0 @@ -if(WITH_LLVM) - check_cxx_compiler_flag("-msse -msse2 -mcx16" COMPILER_X86) - check_cxx_compiler_flag("-march=armv8-a+lse" COMPILER_ARM) - - if(BUILD_LLVM) - message(STATUS "LLVM will be built from the submodule.") - - if (ANDROID) - if (COMPILER_ARM) - set(LLVM_TARGETS_TO_BUILD "AArch64" CACHE STRING "Semicolon-separated list of targets to build, or \"all\".") - set(LLVM_TARGET_ARCH "${CMAKE_SYSTEM_PROCESSOR}-none-linux-android${ANDROID_NATIVE_API_LEVEL}" CACHE STRING "") - else() - set(LLVM_TARGETS_TO_BUILD "X86" CACHE STRING "Semicolon-separated list of targets to build, or \"all\".") - endif() - else() - set(LLVM_TARGETS_TO_BUILD "AArch64;X86" CACHE STRING "Semicolon-separated list of targets to build, or \"all\".") - endif() - - option(LLVM_BUILD_RUNTIME "Build the LLVM runtime libraries." OFF) - option(LLVM_BUILD_TOOLS "Build the LLVM tools. If OFF, just generate build targets." OFF) - option(LLVM_INCLUDE_BENCHMARKS "Generate benchmark targets. If OFF, benchmarks can't be built." OFF) - option(LLVM_INCLUDE_DOCS "Generate build targets for llvm documentation." OFF) - option(LLVM_INCLUDE_EXAMPLES "Generate build targets for the LLVM examples" OFF) - option(LLVM_INCLUDE_TESTS "Generate build targets for the LLVM unit tests." OFF) - option(LLVM_INCLUDE_TOOLS "Generate build targets for the LLVM tools." OFF) - option(LLVM_INCLUDE_UTILS "Generate build targets for the LLVM utils." OFF) - option(LLVM_CCACHE_BUILD "Set to ON for a ccache enabled build" OFF) - set(LLVM_ENABLE_WARNINGS OFF CACHE BOOL "Enable compiler warnings.") - - # For Windows x86 (not Windows AArch64). - # Check on MSVC is needed due to COMPILER_X86, COMPILER_ARM etc. are not set/supported by the MSVC compiler, if used. - # Furthermore, the MSVC compiler is not available/supported on Windows AArch64 - if(WIN32 AND (COMPILER_X86 OR MSVC)) - set(LLVM_USE_INTEL_JITEVENTS ON) - endif() - - if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - if(COMPILER_X86) - set(LLVM_USE_INTEL_JITEVENTS ON) - endif() - set(LLVM_USE_PERF ON) - endif() - - if (MSVC) - add_compile_definitions("$<$:_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS>") - endif() - - # LLVM needs to be built out-of-tree - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/llvm/llvm ${CMAKE_CURRENT_BINARY_DIR}/llvm_build EXCLUDE_FROM_ALL) - set(LLVM_DIR "${CMAKE_CURRENT_BINARY_DIR}/llvm_build/lib/cmake/llvm/") - - set(STATIC_LINK_LLVM ON CACHE BOOL "Link against LLVM statically. This will get set to ON if you build LLVM from the submodule." FORCE) - - find_package(LLVM 19.1 CONFIG) - if(NOT LLVM_FOUND) - message(FATAL_ERROR "Couldn't build LLVM from the submodule. You might need to run `git submodule update --init`") - endif() - else() - message(STATUS "Using prebuilt or system LLVM") - - if (LLVM_DIR AND NOT IS_ABSOLUTE "${LLVM_DIR}") - # change relative LLVM_DIR to be relative to the source dir - set(LLVM_DIR ${CMAKE_SOURCE_DIR}/${LLVM_DIR}) - endif() - - find_package(LLVM CONFIG) - - if (NOT LLVM_FOUND) - message(FATAL_ERROR "Can't find LLVM libraries from the CMAKE_PREFIX_PATH path or LLVM_DIR. Enable BUILD_LLVM option to build LLVM from included as a git submodule.") - endif() - if (LLVM_VERSION VERSION_LESS 18) - message(FATAL_ERROR "Found LLVM version ${LLVM_VERSION}. Required version 18 or above. Enable BUILD_LLVM option to build LLVM from included as a git submodule.") - endif() - endif() - - if (STATIC_LINK_LLVM) - if (NOT DEFINED LLVM_TARGETS_TO_BUILD) - if(COMPILER_ARM) - set(LLVM_TARGETS_TO_BUILD "AArch64" CACHE STRING "Semicolon-separated list of targets to build, or \"all\".") - else() - set(LLVM_TARGETS_TO_BUILD "X86" CACHE STRING "Semicolon-separated list of targets to build, or \"all\".") - endif() - endif() - - # For Windows x86 (not Windows AArch64) only when BUILD_LLVM is enabled and - # for Linux x86 (not Linux AArch64) even if BUILD_LLVM is disabled (precompiled llvm used) - if(LLVM_USE_INTEL_JITEVENTS OR (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND COMPILER_X86)) - list (APPEND LLVM_ADDITIONAL_LIBS IntelJITEvents) - endif() - - # For Linux even if BUILD_LLVM is disabled (precompiled llvm used) - if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - list (APPEND LLVM_ADDITIONAL_LIBS PerfJITEvents) - endif() - - llvm_map_components_to_libnames(LLVM_LIBS - ${LLVM_TARGETS_TO_BUILD} - ${LLVM_ADDITIONAL_LIBS} - Core - ExecutionEngine - MCJIT - Passes - ) - else() - set(LLVM_LIBS LLVM) - endif() - - add_library(3rdparty_llvm INTERFACE) - target_link_libraries(3rdparty_llvm INTERFACE ${LLVM_LIBS}) - target_include_directories(3rdparty_llvm INTERFACE ${LLVM_INCLUDE_DIRS}) - separate_arguments(LLVM_DEFINITIONS_LIST NATIVE_COMMAND ${LLVM_DEFINITIONS}) - target_compile_definitions(3rdparty_llvm INTERFACE ${LLVM_DEFINITIONS_LIST} LLVM_AVAILABLE) - - add_library(3rdparty::llvm ALIAS 3rdparty_llvm) -else() - add_library(3rdparty::llvm ALIAS 3rdparty_dummy_lib) -endif() diff --git a/3rdparty/llvm/llvm b/3rdparty/llvm/llvm deleted file mode 160000 index cd708029e0..0000000000 --- a/3rdparty/llvm/llvm +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cd708029e0b2869e80abe31ddb175f7c35361f90 diff --git a/3rdparty/miniupnp/CMakeLists.txt b/3rdparty/miniupnp/CMakeLists.txt deleted file mode 100644 index 93c3dc55cd..0000000000 --- a/3rdparty/miniupnp/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -option (UPNPC_BUILD_STATIC "Build static library" TRUE) -option (UPNPC_BUILD_SHARED "Build shared library" FALSE) -option (UPNPC_BUILD_TESTS "Build test executables" FALSE) -option (UPNPC_BUILD_SAMPLE "Build sample executables" FALSE) -option (NO_GETADDRINFO "Define NO_GETADDRINFO" FALSE) -option (UPNPC_NO_INSTALL "Disable installation" TRUE) - -add_subdirectory(miniupnp/miniupnpc EXCLUDE_FROM_ALL) diff --git a/3rdparty/miniupnp/miniupnp b/3rdparty/miniupnp/miniupnp deleted file mode 160000 index d66872e34d..0000000000 --- a/3rdparty/miniupnp/miniupnp +++ /dev/null @@ -1 +0,0 @@ -Subproject commit d66872e34d9ff83a07f8b71371b13419b2089953 diff --git a/3rdparty/miniupnp/miniupnpc_static.vcxproj b/3rdparty/miniupnp/miniupnpc_static.vcxproj deleted file mode 100644 index e331c013d4..0000000000 --- a/3rdparty/miniupnp/miniupnpc_static.vcxproj +++ /dev/null @@ -1,138 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - Win32Proj - {5228f863-e0dd-4de7-aa7b-5c52b14cd4d0} - miniupnpcstatic - - - - - - StaticLibrary - true - Unicode - - - StaticLibrary - false - true - Unicode - - - - - - - - - - - - - - - true - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - false - $(SolutionDir)build\lib\$(Configuration)-$(Platform)\ - $(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\ - - - - Level3 - true - _WINSOCK_DEPRECATED_NO_WARNINGS; _CRT_SECURE_NO_WARNINGS;MINIUPNP_STATICLIB;_DEBUG;_LIB;%(PreprocessorDefinitions) - true - NotUsing - pch.h - generated;miniupnp\miniupnpc;miniupnp\miniupnpc\include - - - true - Windows - - - cd $(ProjectDir)miniupnp\miniupnpc\msvc -cscript genminiupnpcstrings.vbs - - - - - Level3 - true - true - true - _WINSOCK_DEPRECATED_NO_WARNINGS; _CRT_SECURE_NO_WARNINGS;MINIUPNP_STATICLIB;NDEBUG;_LIB;%(PreprocessorDefinitions) - true - NotUsing - pch.h - generated;miniupnp\miniupnpc;miniupnp\miniupnpc\include - - - - - true - true - true - - - cd $(ProjectDir)miniupnp\miniupnpc\msvc -cscript genminiupnpcstrings.vbs - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/3rdparty/opencv/CMakeLists.txt b/3rdparty/opencv/CMakeLists.txt deleted file mode 100644 index 1103f3be94..0000000000 --- a/3rdparty/opencv/CMakeLists.txt +++ /dev/null @@ -1,21 +0,0 @@ -# OpenCV - -set(OPENCV_TARGET 3rdparty_dummy_lib PARENT_SCOPE) - -if (USE_SYSTEM_OPENCV) - message(STATUS "RPCS3: using system OpenCV") - find_package(OpenCV COMPONENTS core photo) - - if(OPENCV_FOUND) - message(STATUS "RPCS3: found system OpenCV") - include_directories(${OpenCV_INCLUDE_DIRS}) - add_library(3rdparty_opencv INTERFACE) - target_link_libraries(3rdparty_opencv INTERFACE ${OpenCV_LIBS}) - target_compile_definitions(3rdparty_opencv INTERFACE -DHAVE_OPENCV) - set(OPENCV_TARGET 3rdparty_opencv PARENT_SCOPE) - else() - message(WARNING "RPCS3: OpenCV not found. Building without OpenCV support") - endif() -else() - message(WARNING "RPCS3: Building without OpenCV support") -endif() diff --git a/3rdparty/opencv/opencv b/3rdparty/opencv/opencv deleted file mode 160000 index 50fb5e701d..0000000000 --- a/3rdparty/opencv/opencv +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 50fb5e701d8b0d3fe8262ed84668a94cc8cbf0b1 diff --git a/3rdparty/pine/pine_server.h b/3rdparty/pine/pine_server.h deleted file mode 100644 index fe88ddb474..0000000000 --- a/3rdparty/pine/pine_server.h +++ /dev/null @@ -1,629 +0,0 @@ -// Based on https://github.com/PCSX2/pcsx2/blob/edeb0d7bd7258c58273cc4a88a9f9a823d71e48c/pcsx2/IPC.h -// and https://github.com/PCSX2/pcsx2/blob/edeb0d7bd7258c58273cc4a88a9f9a823d71e48c/pcsx2/IPC.cpp -// Relicensed as GPLv2 for use in RPCS3 with permission from copyright owner (Govanify). - -#pragma once - -// IPC uses a concept of "slot" to be able to communicate with multiple -// emulators at the same time, each slot should be unique to each emulator to -// allow PnP and configurable by the end user so that several runs don't -// conflict with each others -#define IPC_DEFAULT_SLOT 28012 - -#include -#include "stdafx.h" -#include -#include -#include -#include -#if _WIN32 -#define read_portable(a, b, c) (recv(a, b, ::narrow(c), 0)) -#define write_portable(a, b, c) (send(a, b, ::narrow(c), 0)) -#define close_portable(a) (closesocket(a)) -#define bzero(b, len) (memset((b), '\0', (len)), (void)0) -#include -#include -#include -#else -#define read_portable(a, b, c) (read(a, b, c)) -#define write_portable(a, b, c) (write(a, b, c)) -#define close_portable(a) (close(a)) -#include -#include -#include -#include -#include -#endif - -#ifdef _WIN32 -constexpr SOCKET invalid_socket = INVALID_SOCKET; -#else -constexpr int invalid_socket = -1; -#endif - -namespace pine -{ - /** - * Emulator status enum. @n - * A list of possible emulator statuses. @n - */ - enum EmuStatus : uint32_t - { - Running = 0, /**< Game is running */ - Paused = 1, /**< Game is paused */ - Shutdown = 2, /**< Game is shutdown */ - }; - - typedef unsigned long long SOCKET; - - template - class pine_server : public Impl - { - public: -#ifdef _WIN32 - // windows claim to have support for AF_UNIX sockets but that is a blatant lie, - // their SDK won't even run their own examples, so we go on TCP sockets. - SOCKET m_sock; - // the message socket used in thread's accept(). - SOCKET m_msgsock; -#else - // absolute path of the socket. Stored in XDG_RUNTIME_DIR, if unset /tmp - std::string m_socket_name; - int m_sock = 0; - // the message socket used in thread's accept(). - int m_msgsock = 0; -#endif - - /** - * Maximum memory used by an IPC message request. - * Equivalent to 50,000 Write64 requests. - */ - #define MAX_IPC_SIZE 650000 - - /** - * Maximum memory used by an IPC message reply. - * Equivalent to 50,000 Read64 replies. - */ - #define MAX_IPC_RETURN_SIZE 450000 - - /** - * IPC return buffer. - * A preallocated buffer used to store all IPC replies. - * to the size of 50.000 MsgWrite64 IPC calls. - */ - std::vector m_ret_buffer; - - /** - * IPC messages buffer. - * A preallocated buffer used to store all IPC messages. - */ - std::vector m_ipc_buffer; - - /** - * IPC Command messages opcodes. - * A list of possible operations possible by the IPC. - * Each one of them is what we call an "opcode" and is the first - * byte sent by the IPC to differentiate between commands. - */ - enum IPCCommand : unsigned char - { - MsgRead8 = 0, /**< Read 8 bit value to memory. */ - MsgRead16 = 1, /**< Read 16 bit value to memory. */ - MsgRead32 = 2, /**< Read 32 bit value to memory. */ - MsgRead64 = 3, /**< Read 64 bit value to memory. */ - MsgWrite8 = 4, /**< Write 8 bit value to memory. */ - MsgWrite16 = 5, /**< Write 16 bit value to memory. */ - MsgWrite32 = 6, /**< Write 32 bit value to memory. */ - MsgWrite64 = 7, /**< Write 64 bit value to memory. */ - MsgVersion = 8, /**< Returns RPCS3 version. */ - MsgTitle = 0xB, /**< Returns the game title. */ - MsgID = 0xC, /**< Returns the game ID. */ - MsgUUID = 0xD, /**< Returns the game UUID. */ - MsgGameVersion = 0xE, /**< Returns the game verion. */ - MsgStatus = 0xF, /**< Returns the emulator status. */ - MsgUnimplemented = 0xFF /**< Unimplemented IPC message. */ - }; - - /** - * IPC message buffer. - * A list of all needed fields to store an IPC message. - */ - struct IPCBuffer - { - usz size{}; /**< Size of the buffer. */ - char* buffer{}; /**< Buffer. */ - }; - - /** - * IPC result codes. - * A list of possible result codes the IPC can send back. - * Each one of them is what we call an "opcode" or "tag" and is the - * first byte sent by the IPC to differentiate between results. - */ - enum IPCResult : unsigned char - { - IPC_OK = 0, /**< IPC command successfully completed. */ - IPC_FAIL = 0xFF /**< IPC command failed to complete. */ - }; - - /** - * Internal function, Parses an IPC command. - * buf: buffer containing the IPC command. - * buf_size: size of the buffer announced. - * ret_buffer: buffer that will be used to send the reply. - * return value: IPCBuffer containing a buffer with the result - * of the command and its size. - */ - IPCBuffer ParseCommand(char* buf, char* ret_buffer, u32 buf_size) - { - usz ret_cnt = 5; - usz buf_cnt = 0; - - const auto error = [&]() - { - return IPCBuffer{ 5, MakeFailIPC(ret_buffer) }; - }; - - const auto write_string = [&](const std::string& str) - { - if (!SafetyChecks(buf_cnt, 0, ret_cnt, str.size() + 1 + sizeof(u32), buf_size)) - return false; - ToArray(ret_buffer, ::narrow(str.size() + 1), ret_cnt); - ret_cnt += sizeof(u32); - memcpy(&ret_buffer[ret_cnt], str.data(), str.size()); - ret_cnt += str.size(); - ret_buffer[ret_cnt++] = '\0'; - return true; - }; - - while (buf_cnt < buf_size) - { - if (!SafetyChecks(buf_cnt, 1, ret_cnt, 0, buf_size)) - return error(); - buf_cnt++; - // example IPC messages: MsgRead/Write - // refer to the client doc for more info on the format - // IPC Message event (1 byte) - // | Memory address (4 byte) - // | | argument (VLE) - // | | | - // format: XX YY YY YY YY ZZ ZZ ZZ ZZ - // reply code: 00 = OK, FF = NOT OK - // | return value (VLE) - // | | - // reply: XX ZZ ZZ ZZ ZZ - IPCCommand command = std::bit_cast(buf[buf_cnt - 1]); - switch (command) - { - case MsgRead8: - { - if (!SafetyChecks(buf_cnt, 4, ret_cnt, 1, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<1>(a)) - return error(); - const u8 res = Impl::read8(a); - ToArray(ret_buffer, res, ret_cnt); - ret_cnt += 1; - buf_cnt += 4; - break; - } - case MsgRead16: - { - if (!SafetyChecks(buf_cnt, 4, ret_cnt, 2, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<2>(a)) - return error(); - const u16 res = Impl::read16(a); - ToArray(ret_buffer, res, ret_cnt); - ret_cnt += 2; - buf_cnt += 4; - break; - } - case MsgRead32: - { - if (!SafetyChecks(buf_cnt, 4, ret_cnt, 4, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<4>(a)) - return error(); - const u32 res = Impl::read32(a); - ToArray(ret_buffer, res, ret_cnt); - ret_cnt += 4; - buf_cnt += 4; - break; - } - case MsgRead64: - { - if (!SafetyChecks(buf_cnt, 4, ret_cnt, 8, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<8>(a)) - return error(); - u64 res = Impl::read64(a); - ToArray(ret_buffer, res, ret_cnt); - ret_cnt += 8; - buf_cnt += 4; - break; - } - case MsgWrite8: - { - if (!SafetyChecks(buf_cnt, 1 + 4, ret_cnt, 0, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<1>(a, vm::page_writable)) - return error(); - Impl::write8(a, FromArray(&buf[buf_cnt], 4)); - buf_cnt += 5; - break; - } - case MsgWrite16: - { - if (!SafetyChecks(buf_cnt, 2 + 4, ret_cnt, 0, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<2>(a, vm::page_writable)) - return error(); - Impl::write16(a, FromArray(&buf[buf_cnt], 4)); - buf_cnt += 6; - break; - } - case MsgWrite32: - { - if (!SafetyChecks(buf_cnt, 4 + 4, ret_cnt, 0, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<4>(a, vm::page_writable)) - return error(); - Impl::write32(a, FromArray(&buf[buf_cnt], 4)); - buf_cnt += 8; - break; - } - case MsgWrite64: - { - if (!SafetyChecks(buf_cnt, 8 + 4, ret_cnt, 0, buf_size)) - return error(); - const u32 a = FromArray(&buf[buf_cnt], 0); - if (!Impl::template check_addr<8>(a, vm::page_writable)) - return error(); - Impl::write64(a, FromArray(&buf[buf_cnt], 4)); - buf_cnt += 12; - break; - } - case MsgVersion: - { - if (!write_string("RPCS3 " + Impl::get_version_and_branch())) - return error(); - break; - } - case MsgStatus: - { - if (!SafetyChecks(buf_cnt, 0, ret_cnt, 4, buf_size)) - return error(); - EmuStatus status = Impl::get_status(); - ToArray(ret_buffer, status, ret_cnt); - ret_cnt += 4; - buf_cnt += 4; - break; - } - case MsgTitle: - { - if (!write_string(Impl::get_title())) - return error(); - break; - } - case MsgID: - { - if (!write_string(Impl::get_title_ID())) - return error(); - break; - } - case MsgUUID: - { - if (!write_string(Impl::get_executable_hash())) - return error(); - break; - } - case MsgGameVersion: - { - if (!write_string(Impl::get_app_version())) - return error(); - break; - } - default: - { - return error(); - } - } - } - return IPCBuffer{ ret_cnt, MakeOkIPC(ret_buffer, ret_cnt) }; - } - - /** - * Formats an IPC buffer - * ret_buffer: return buffer to use. - * size: size of the IPC buffer. - * return value: buffer containing the status code allocated of size - */ - static inline char* MakeOkIPC(char* ret_buffer, usz size = 5) - { - ToArray(ret_buffer, ::narrow(size), 0); - ret_buffer[4] = IPC_OK; - return ret_buffer; - } - - static inline char* MakeFailIPC(char* ret_buffer, usz size = 5) - { - ToArray(ret_buffer, ::narrow(size), 0); - ret_buffer[4] = IPC_FAIL; - return ret_buffer; - } - - /** - * Initializes an open socket for IPC communication. - * return value: false if a fatal failure happened, true otherwise. - */ - bool StartSocket() - { - ::pollfd poll_fd{}; - - for (int pending_connection = 0; pending_connection != 1;) - { - if (thread_ctrl::state() == thread_state::aborting) - { - return false; - } - - std::memset(&poll_fd, 0, sizeof(poll_fd)); - poll_fd.events = POLLIN; - poll_fd.revents = 0; - poll_fd.fd = m_sock; - -#ifdef _WIN32 - // Try to wait for an incoming connection - pending_connection = ::WSAPoll(&poll_fd, 1, 10); -#else - pending_connection = ::poll(&poll_fd, 1, 10); -#endif - } - - m_msgsock = accept(m_sock, 0, 0); - - if (m_msgsock == invalid_socket) - { - // everything else is non recoverable in our scope - // we also mark as recoverable socket errors where it would block a - // non blocking socket, even though our socket is blocking, in case - // we ever have to implement a non blocking socket. -#ifdef _WIN32 - int errno_w = WSAGetLastError(); - if (!(errno_w == WSAECONNRESET || errno_w == WSAEINTR || errno_w == WSAEINPROGRESS || errno_w == WSAEMFILE || errno_w == WSAEWOULDBLOCK)) - { -#else - if (!(errno == ECONNABORTED || errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK)) - { -#endif - Impl::error("IPC: An unrecoverable error happened! Shutting down..."); - return false; - } - } - return true; - } - - // Thread used to relay IPC commands. - void operator()() - { - // we allocate once buffers to not have to do mallocs for each IPC - // request, as malloc is expansive when we optimize for µs. - m_ret_buffer.resize(MAX_IPC_RETURN_SIZE); - m_ipc_buffer.resize(MAX_IPC_SIZE); - - if (!StartSocket()) - return; - - while (thread_ctrl::state() != thread_state::aborting) - { - // either int or ssize_t depending on the platform, so we have to - // use a bunch of auto - auto receive_length = 0; - auto end_length = 4; - - // while we haven't received the entire packet, maybe due to - // socket datagram splittage, we continue to read - while (receive_length < end_length) - { - auto tmp_length = read_portable(m_msgsock, &m_ipc_buffer[receive_length], MAX_IPC_SIZE - receive_length); - - // we recreate the socket if an error happens - if (tmp_length <= 0) - { - receive_length = 0; - if (!StartSocket()) - return; - break; - } - - receive_length += tmp_length; - - // if we got at least the final size then update - if (end_length == 4 && receive_length >= 4) - { - end_length = FromArray(m_ipc_buffer.data(), 0); - // we'd like to avoid a client trying to do OOB - if (end_length > MAX_IPC_SIZE || end_length < 4) - { - receive_length = 0; - break; - } - } - } - - // we remove 4 bytes to get the message size out of the IPC command - // size in ParseCommand. - // also, if we got a failed command, let's reset the state so we don't - // end up deadlocking by getting out of sync, eg when a client - // disconnects - if (receive_length != 0) - { - pine_server::IPCBuffer res = ParseCommand(&m_ipc_buffer[4], m_ret_buffer.data(), static_cast(end_length) - 4); - - // if we cannot send back our answer restart the socket - if (write_portable(m_msgsock, res.buffer, res.size) < 0) - { - if (!StartSocket()) - return; - } - } - } - } - - /** - * Converts an uint to an char* in little endian - * res_array: the array to modify - * res: the value to convert - * i: when to insert it into the array - * return value: res_array - * NB: implicitely inlined - */ - template - static char* ToArray(char* res_array, T res, usz i) - { - memcpy(res_array + i, reinterpret_cast(&res), sizeof(T)); - return res_array; - } - - /** - * Converts a char* to an uint in little endian - * arr: the array to convert - * i: when to load it from the array - * return value: the converted value - * NB: implicitely inlined - */ - template - static T FromArray(char* arr, int i) - { - return *reinterpret_cast(arr + i); - } - - /** - * Ensures an IPC message isn't too big. - * return value: false if checks failed, true otherwise. - */ - static inline bool SafetyChecks(usz command_len, usz command_size, usz reply_len, usz reply_size = 0, usz buf_size = MAX_IPC_SIZE - 1) - { - bool res = ((command_len + command_size) > buf_size || - (reply_len + reply_size) >= MAX_IPC_RETURN_SIZE); - if (res) [[unlikely]] - return false; - return true; - } - - public: - /* Initializers */ - pine_server() noexcept - { -#ifdef _WIN32 - WSADATA wsa; - struct sockaddr_in server; - m_sock = INVALID_SOCKET; - m_msgsock = INVALID_SOCKET; - - if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0) - { - Impl::error("IPC: Cannot initialize winsock! Shutting down..."); - return; - } - - if ((m_sock = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET) - { - Impl::error("IPC: Cannot open socket! Shutting down..."); - return; - } - - // yes very good windows s/sun/sin/g sure is fine - server.sin_family = AF_INET; - // localhost only - if (!inet_pton(server.sin_family, "127.0.0.1", &server.sin_addr.s_addr)) - { - fmt::throw_exception("IPC: Failed to convert localhost"); - } - server.sin_port = htons(Impl::get_port()); - - if (bind(m_sock, reinterpret_cast(&server), sizeof(server)) == SOCKET_ERROR) - { - Impl::error("IPC: Error while binding to socket! Shutting down..."); - return; - } - -#else - char* runtime_dir = nullptr; -#ifdef __APPLE__ - runtime_dir = std::getenv("TMPDIR"); -#else - runtime_dir = std::getenv("XDG_RUNTIME_DIR"); -#endif - // fallback in case macOS or other OSes don't implement the XDG base - // spec - if (runtime_dir == nullptr) - m_socket_name = "/tmp/rpcs3.sock"; - else - { - m_socket_name = runtime_dir; - m_socket_name += "/rpcs3.sock"; - } - - const int slot = Impl::get_port(); - if (slot != IPC_DEFAULT_SLOT) - { - fmt::append(m_socket_name, ".%d", slot); - } - - struct sockaddr_un server; - - m_sock = socket(AF_UNIX, SOCK_STREAM, 0); - if (m_sock < 0) - { - Impl::error("IPC: Cannot open socket! Shutting down..."); - return; - } - server.sun_family = AF_UNIX; - strcpy(server.sun_path, m_socket_name.c_str()); - - // we unlink the socket so that when releasing this thread the socket gets - // freed even if we didn't close correctly the loop - unlink(m_socket_name.c_str()); - if (bind(m_sock, std::bit_cast(&server), sizeof(struct sockaddr_un))) - { - Impl::error("IPC: Error while binding to socket! Shutting down..."); - return; - } -#endif - - // maximum queue of 4096 commands before refusing, approximated to the - // nearest legal value. We do not use SOMAXCONN as windows have this idea - // that a "reasonable" value is 5, which is not. - listen(m_sock, 4096); - } - - pine_server(const pine_server&) = delete; - pine_server& operator=(const pine_server&) = delete; - - void Cleanup() - { -#ifdef _WIN32 - WSACleanup(); -#else - unlink(m_socket_name.c_str()); -#endif - close_portable(m_sock); - close_portable(m_msgsock); - } - - ~pine_server() - { - Cleanup(); - } - - }; // class pine_server -} diff --git a/3rdparty/pugixml b/3rdparty/pugixml index ee86beb30e..08b3433180 160000 --- a/3rdparty/pugixml +++ b/3rdparty/pugixml @@ -1 +1 @@ -Subproject commit ee86beb30e4973f5feffe3ce63bfa4fbadf72f38 +Subproject commit 08b3433180727ea2f78fe02e860a08471db1e03c diff --git a/3rdparty/qt5.cmake b/3rdparty/qt5.cmake new file mode 100644 index 0000000000..914ba435e1 --- /dev/null +++ b/3rdparty/qt5.cmake @@ -0,0 +1,45 @@ +add_library(3rdparty_qt5 INTERFACE) + +set(QT_MIN_VER 5.15.2) + +find_package(Qt5 ${QT_MIN_VER} CONFIG COMPONENTS Widgets Concurrent) +if(WIN32) + find_package(Qt5 ${QT_MIN_VER} COMPONENTS WinExtras REQUIRED) + target_link_libraries(3rdparty_qt5 INTERFACE Qt5::Widgets Qt5::WinExtras Qt5::Concurrent) +else() + find_package(Qt5 ${QT_MIN_VER} COMPONENTS DBus Gui) + if(Qt5DBus_FOUND) + target_link_libraries(3rdparty_qt5 INTERFACE Qt5::Widgets Qt5::DBus Qt5::Concurrent) + target_compile_definitions(3rdparty_qt5 INTERFACE -DHAVE_QTDBUS) + else() + target_link_libraries(3rdparty_qt5 INTERFACE Qt5::Widgets Qt5::Concurrent) + endif() + target_include_directories(3rdparty_qt5 INTERFACE ${Qt5Gui_PRIVATE_INCLUDE_DIRS}) +endif() + +if(NOT Qt5Widgets_FOUND) + if(Qt5Widgets_VERSION VERSION_LESS ${QT_MIN_VER}) + message("Minimum supported Qt5 version is ${QT_MIN_VER}! You have version ${Qt5Widgets_VERSION} installed, please upgrade!") + if(CMAKE_SYSTEM MATCHES "Linux") + message(FATAL_ERROR "Most distros do not provide an up-to-date version of Qt. +If you're on Ubuntu or Linux Mint, there are PPAs you can use to install one of the latest qt5 versions. +Find the correct ppa at https://launchpad.net/~beineri and follow the instructions.") + elseif(WIN32) + message(FATAL_ERROR "You can download the latest version of Qt5 here: https://www.qt.io/download-open-source/") + else() + message(FATAL_ERROR "Look online for instructions on installing an up-to-date Qt5 on ${CMAKE_SYSTEM}.") + endif() + endif() + + message("CMake was unable to find Qt5!") + if(WIN32) + message(FATAL_ERROR "Make sure the QTDIR env variable has been set properly. (for example C:\\Qt\\${QT_MIN_VER}\\msvc2019_64\\) +You can also try setting the Qt5_DIR preprocessor definiton.") + elseif(CMAKE_SYSTEM MATCHES "Linux") + message(FATAL_ERROR "Make sure to install your distro's qt5 package!") + else() + message(FATAL_ERROR "You need to have Qt5 installed, look online for instructions on installing Qt5 on ${CMAKE_SYSTEM}.") + endif() +endif() + +add_library(3rdparty::qt5 ALIAS 3rdparty_qt5) diff --git a/3rdparty/qt6.cmake b/3rdparty/qt6.cmake deleted file mode 100644 index ef89bdab05..0000000000 --- a/3rdparty/qt6.cmake +++ /dev/null @@ -1,43 +0,0 @@ -add_library(3rdparty_qt6 INTERFACE) - -set(QT_MIN_VER 6.7.0) - -find_package(Qt6 ${QT_MIN_VER} CONFIG COMPONENTS Widgets Concurrent Multimedia MultimediaWidgets Svg SvgWidgets) -if(WIN32) - target_link_libraries(3rdparty_qt6 INTERFACE Qt6::Widgets Qt6::Concurrent Qt6::Multimedia Qt6::MultimediaWidgets Qt6::Svg Qt6::SvgWidgets) -else() - find_package(Qt6 ${QT_MIN_VER} COMPONENTS DBus Gui) - if(Qt6DBus_FOUND) - target_link_libraries(3rdparty_qt6 INTERFACE Qt6::Widgets Qt6::DBus Qt6::Concurrent Qt6::Multimedia Qt6::MultimediaWidgets Qt6::Svg Qt6::SvgWidgets) - target_compile_definitions(3rdparty_qt6 INTERFACE -DHAVE_QTDBUS) - else() - target_link_libraries(3rdparty_qt6 INTERFACE Qt6::Widgets Qt6::Concurrent Qt6::Multimedia Qt6::MultimediaWidgets Qt6::Svg Qt6::SvgWidgets) - endif() - target_include_directories(3rdparty_qt6 INTERFACE ${Qt6Gui_PRIVATE_INCLUDE_DIRS}) -endif() - -if(Qt6Widgets_FOUND) - if(Qt6Widgets_VERSION VERSION_LESS ${QT_MIN_VER}) - message("Minimum supported Qt version is ${QT_MIN_VER}! You have version ${Qt6Widgets_VERSION} installed, please upgrade!") - if(CMAKE_SYSTEM_NAME STREQUAL "Linux") - message(FATAL_ERROR "Most distros do not provide an up-to-date version of Qt. -If you're on Ubuntu or Linux Mint, there are PPAs you can use to install one of the latest qt6 versions. -Find the correct ppa at https://launchpad.net/~beineri and follow the instructions.") - elseif(WIN32) - message(FATAL_ERROR "You can download the latest version of Qt6 here: https://www.qt.io/download-open-source/") - else() - message(FATAL_ERROR "Look online for instructions on installing an up-to-date Qt6 on ${CMAKE_SYSTEM}.") - endif() - endif() -else() - message("CMake was unable to find Qt6!") - if(WIN32) - message(FATAL_ERROR "Make sure the Qt6_ROOT environment variable has been set properly. (for example C:\\Qt\\${QT_MIN_VER}\\msvc2022_64\\)") - elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") - message(FATAL_ERROR "Make sure to install your distro's qt6 package!") - else() - message(FATAL_ERROR "You need to have Qt6 installed, look online for instructions on installing Qt6 on ${CMAKE_SYSTEM}.") - endif() -endif() - -add_library(3rdparty::qt6 ALIAS 3rdparty_qt6) diff --git a/3rdparty/rtmidi/CMakeLists.txt b/3rdparty/rtmidi/CMakeLists.txt deleted file mode 100644 index 610f858c6d..0000000000 --- a/3rdparty/rtmidi/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -option(RTMIDI_API_JACK "Compile with JACK support." OFF) -option(RTMIDI_BUILD_TESTING "Build test programs" OFF) -set(RTMIDI_TARGETNAME_UNINSTALL "uninstall-rpcs3-rtmidi") -add_subdirectory(rtmidi EXCLUDE_FROM_ALL) diff --git a/3rdparty/rtmidi/rtmidi b/3rdparty/rtmidi/rtmidi deleted file mode 160000 index 1e5b49925a..0000000000 --- a/3rdparty/rtmidi/rtmidi +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1e5b49925aa60065db52de44c366d446a902547b diff --git a/3rdparty/stblib/CMakeLists.txt b/3rdparty/stblib/CMakeLists.txt deleted file mode 100644 index 3426459edb..0000000000 --- a/3rdparty/stblib/CMakeLists.txt +++ /dev/null @@ -1,2 +0,0 @@ -add_library(3rdparty_stblib INTERFACE) -target_include_directories(3rdparty_stblib INTERFACE stb) diff --git a/3rdparty/stblib/LICENSE b/3rdparty/stblib/LICENSE new file mode 100644 index 0000000000..8681adb3af --- /dev/null +++ b/3rdparty/stblib/LICENSE @@ -0,0 +1,16 @@ +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/3rdparty/stblib/include/stb_image.h b/3rdparty/stblib/include/stb_image.h new file mode 100644 index 0000000000..0f8459a799 --- /dev/null +++ b/3rdparty/stblib/include/stb_image.h @@ -0,0 +1,7890 @@ +/* stb_image - v2.27 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes + 2.26 (2020-07-13) many minor fixes + 2.25 (2020-02-02) fix warnings + 2.24 (2020-02-02) fix warnings; thread-local failure_reason and flip_vertically + 2.23 (2019-08-11) fix clang static analysis warning + 2.22 (2019-03-04) gif fixes, fix warnings + 2.21 (2019-02-25) fix typo in comment + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine Simon Breuss (16-bit PNM) + John-Mark Allen + Carmelo J Fdez-Aguera + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Blazej Dariusz Roszkowski + Phil Jordan Dave Moore Roy Eltham + Hayaki Saito Nathan Reed Won Chun + Luke Graham Johan Duparc Nick Verigakis the Horde3D community + Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Eugene Golushkov Laurent Gomila Cort Stratton github:snagar + Aruelien Pocheville Sergio Gonzalez Thibault Reuille github:Zelex + Cass Everitt Ryamond Barbiero github:grim210 + Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw + Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus + Josh Tobin Matthew Gregan github:poppolopoppo + Julian Raschke Gregory Mullen Christian Floisand github:darealshinji + Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 + Brad Weinberger Matvey Cherevko github:mosra + Luca Sas Alexander Veselov Zack Middleton [reserved] + Ryan C. Gordon [reserved] [reserved] + DO NOT ADD YOUR NAME HERE + + Jacko Dirks + + To add your name to the credits, pick a random blank space in the middle and fill it. + 80% of merge conflicts on stb PRs are due to people adding their name at the end + of the credits. +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// To query the width, height and component count of an image without having to +// decode the full file, you can use the stbi_info family of functions: +// +// int x,y,n,ok; +// ok = stbi_info(filename, &x, &y, &n); +// // returns ok=1 and sets x, y, n if image is a supported format, +// // 0 otherwise. +// +// Note that stb_image pervasively uses ints in its public API for sizes, +// including sizes of memory buffers. This is now part of the API and thus +// hard to change without causing breakage. As a result, the various image +// loaders all have certain limits on image size; these differ somewhat +// by format but generally boil down to either just under 2GB or just under +// 1GB. When the decoded image would be larger than this, stb_image decoding +// will fail. +// +// Additionally, stb_image will reject image files that have any of their +// dimensions set to a larger value than the configurable STBI_MAX_DIMENSIONS, +// which defaults to 2**24 = 16777216 pixels. Due to the above memory limit, +// the only way to have an image with such dimensions load correctly +// is for it to have a rather extreme aspect ratio. Either way, the +// assumption here is that such larger images are likely to be malformed +// or malicious. If you do need to load an image with individual dimensions +// larger than that, and it still fits in the overall size limit, you can +// #define STBI_MAX_DIMENSIONS on your own to be something larger. +// +// =========================================================================== +// +// UNICODE: +// +// If compiling for Windows and you wish to use Unicode filenames, compile +// with +// #define STBI_WINDOWS_UTF8 +// and pass utf8-encoded filenames. Call stbi_convert_wchar_to_utf8 to convert +// Windows wchar_t filenames to utf8. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy-to-use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// provide more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image supports loading HDR images in general, and currently the Radiance +// .HDR file format specifically. You can still load any file through the existing +// interface; if you attempt to load an HDR file, it will be automatically remapped +// to LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// We optionally support converting iPhone-formatted PNGs (which store +// premultiplied BGRA) back to RGB, even though they're internally encoded +// differently. To enable this conversion, call +// stbi_convert_iphone_png_to_rgb(1). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// +// - If you define STBI_MAX_DIMENSIONS, stb_image will reject images greater +// than that size (in either width or height) without further processing. +// This is to let programs in the wild set an upper bound to prevent +// denial-of-service attacks on untrusted data, as one could generate a +// valid image of gigantic dimensions and force stb_image to allocate a +// huge block of memory and spend disproportionate time decoding it. By +// default this is set to (1 << 24), which is 16777216, but that's still +// very big. + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +#include +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef STBIDEF +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + +#ifdef STBI_WINDOWS_UTF8 +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// on most compilers (and ALL modern mainstream compilers) this is threadsafe +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// as above, but only applies to images loaded on the thread that calls the function +// this function is only available if your compiler supports thread-local variables; +// calling it will fail to link if your compiler doesn't +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply); +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert); +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + +#ifdef __cplusplus +#define STBI_EXTERN extern "C" +#else +#define STBI_EXTERN extern +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + +#ifndef STBI_NO_THREAD_LOCALS + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif +#endif + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (-(y) & 31))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#endif + +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +#if !defined(STBI_NO_JPEG) && defined(STBI_SSE2) +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif + +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +#ifdef _MSC_VER +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name +#else +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +#ifndef STBI_MAX_DIMENSIONS +#define STBI_MAX_DIMENSIONS (1 << 24) +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + int callback_already_read; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->callback_already_read = 0; + s->img_buffer = s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + int ch; + fseek((FILE*) user, n, SEEK_CUR); + ch = fgetc((FILE*) user); /* have to read a byte to reset feof()'s flag */ + if (ch != EOF) { + ungetc(ch, (FILE *) user); /* push byte back onto stream if valid. */ + } +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user) || ferror((FILE *) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__pnm_is16(stbi__context *s); +#endif + +static +#ifdef STBI_THREAD_LOCAL +STBI_THREAD_LOCAL +#endif +const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +#ifndef STBI_NO_FAILURE_STRINGS +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} +#endif + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} +#endif + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +#if !defined(STBI_NO_JPEG) || !defined(STBI_NO_PNG) || !defined(STBI_NO_TGA) || !defined(STBI_NO_HDR) +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} +#endif + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load_global = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_global = flag_true_if_should_flip; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__vertically_flip_on_load stbi__vertically_flip_on_load_global +#else +static STBI_THREAD_LOCAL int stbi__vertically_flip_on_load_local, stbi__vertically_flip_on_load_set; + +STBIDEF void stbi_set_flip_vertically_on_load_thread(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load_local = flag_true_if_should_flip; + stbi__vertically_flip_on_load_set = 1; +} + +#define stbi__vertically_flip_on_load (stbi__vertically_flip_on_load_set \ + ? stbi__vertically_flip_on_load_local \ + : stbi__vertically_flip_on_load_global) +#endif // STBI_THREAD_LOCAL + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + // test the formats with a very explicit header first (at least a FOURCC + // or distinctive magic number first) + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #else + STBI_NOTUSED(bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + + // then the formats that can end up attempting to load with just 1 or 2 + // bytes matching expectations; these are prone to false positives, so + // try them later + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 8) { + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + // it is the responsibility of the loaders to make sure we get either 8 or 16 bit. + STBI_ASSERT(ri.bits_per_channel == 8 || ri.bits_per_channel == 16); + + if (ri.bits_per_channel != 16) { + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) && !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBI_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBI_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); +#endif + +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) +STBIDEF int stbi_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) +{ + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); +} +#endif + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_WIN32) && defined(STBI_WINDOWS_UTF8) + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; + + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; + +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; +#else + f = _wfopen(wFilename, wMode); +#endif + +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + fseek(f, pos, SEEK_SET); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + s->callback_already_read += (int) (s->img_buffer - s->img_buffer_original); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_HDR) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) +// nothing +#else +static void stbi__skip(stbi__context *s, int n) +{ + if (n == 0) return; // already there! + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_TGA) && defined(STBI_NO_HDR) && defined(STBI_NO_PNM) +// nothing +#else +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} +#endif + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) && defined(STBI_NO_PIC) +// nothing +#else +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} +#endif + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + z += (stbi__uint32)stbi__get16le(s) << 16; + return z; +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + +#if defined(STBI_NO_JPEG) && defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_BMP) && defined(STBI_NO_PSD) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) && defined(STBI_NO_PIC) && defined(STBI_NO_PNM) +// nothing +#else +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} +#endif + +#if defined(STBI_NO_PNG) && defined(STBI_NO_PSD) +// nothing +#else +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0]; dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0]; dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2];dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0];dest[1]=src[1];dest[2]=src[2]; } break; + default: STBI_ASSERT(0); STBI_FREE(data); STBI_FREE(good); return (stbi__uint16*) stbi__errpuc("unsupported", "Unsupported format conversion"); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + } + if (n < comp) { + for (i=0; i < x*y; ++i) { + output[i*comp + n] = data[i*comp + n]/255.0f; + } + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & (sgn - 1)); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * (1 << j->succ_low)); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * (1 << shift)); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * (1 << shift)); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // nothing to do if no components requested; check this now to avoid + // accessing uninitialized coutput[0] later + if (decode_n <= 0) { stbi__cleanup_jpeg(z); return NULL; } + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4] = { NULL, NULL, NULL, NULL }; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) { *out++ = y[i]; *out++ = 255; } + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__errpuc("outofmem", "Out of memory"); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + if (!j) return stbi__err("outofmem", "Out of memory"); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) return stbi__err("outofmem", "Out of memory"); + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) +#define STBI__ZNSYMS 288 // number of symbols in literal/length alphabet + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[STBI__ZNSYMS]; + stbi__uint16 value[STBI__ZNSYMS]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static int stbi__zeof(stbi__zbuf *z) +{ + return (z->zbuffer >= z->zbuffer_end); +} + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + return stbi__zeof(z) ? 0 : *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + if (z->code_buffer >= (1U << z->num_bits)) { + z->zbuffer = z->zbuffer_end; /* treat this as EOF so we fail. */ + return; + } + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s >= 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + if (b >= STBI__ZNSYMS) return -1; // some data was corrupt somewhere! + if (z->size[b] != s) return -1; // was originally an assert, but report failure instead. + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) { + if (stbi__zeof(a)) { + return -1; /* report error for unexpected end of data. */ + } + stbi__fill_bits(a); + } + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + unsigned int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (unsigned int) (z->zout - z->zout_start); + limit = old_limit = (unsigned) (z->zout_end - z->zout_start); + if (UINT_MAX - cur < (unsigned) n) return stbi__err("outofmem", "Out of memory"); + while (cur + n > limit) { + if(limit > UINT_MAX / 2) return stbi__err("outofmem", "Out of memory"); + limit *= 2; + } + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + } else if (c == 18) { + c = stbi__zreceive(a,7)+11; + } else { + return stbi__err("bad codelengths", "Corrupt PNG"); + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + if (a->num_bits < 0) return stbi__err("zlib corrupt","Corrupt PNG"); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if (stbi__zeof(a)) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[STBI__ZNSYMS] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , STBI__ZNSYMS)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + if (!final) return stbi__err("outofmem", "Out of memory"); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load_global = 0; +static int stbi__de_iphone_flag_global = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_global = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_global = flag_true_if_should_convert; +} + +#ifndef STBI_THREAD_LOCAL +#define stbi__unpremultiply_on_load stbi__unpremultiply_on_load_global +#define stbi__de_iphone_flag stbi__de_iphone_flag_global +#else +static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; +static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; + +STBIDEF void stbi__unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; + stbi__unpremultiply_on_load_set = 1; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb_thread(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag_local = flag_true_if_should_convert; + stbi__de_iphone_flag_set = 1; +} + +#define stbi__unpremultiply_on_load (stbi__unpremultiply_on_load_set \ + ? stbi__unpremultiply_on_load_local \ + : stbi__unpremultiply_on_load_global) +#define stbi__de_iphone_flag (stbi__de_iphone_flag_set \ + ? stbi__de_iphone_flag_local \ + : stbi__de_iphone_flag_global) +#endif // STBI_THREAD_LOCAL + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]={0}; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); + s->img_y = stbi__get32be(s); + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth <= 8) + ri->bits_per_channel = 8; + else if (p->depth == 16) + ri->bits_per_channel = 16; + else + return stbi__errpuc("bad bits_per_channel", "PNG not supported: unsupported color depth"); + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) { n += 16; z >>= 16; } + if (z >= 0x00100) { n += 8; z >>= 8; } + if (z >= 0x00010) { n += 4; z >>= 4; } + if (z >= 0x00004) { n += 2; z >>= 2; } + if (z >= 0x00002) { n += 1;/* >>= 1;*/ } + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(unsigned int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; + int extra_read; +} stbi__bmp_data; + +static int stbi__bmp_set_mask_defaults(stbi__bmp_data *info, int compress) +{ + // BI_BITFIELDS specifies masks explicitly, don't override + if (compress == 3) + return 1; + + if (compress == 0) { + if (info->bpp == 16) { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } else if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + // otherwise, use defaults, which is all-0 + info->mr = info->mg = info->mb = info->ma = 0; + } + return 1; + } + return 0; // error +} + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + info->extra_read = 14; + + if (info->offset < 0) return stbi__errpuc("bad BMP", "bad BMP"); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + if (compress >= 4) return stbi__errpuc("BMP JPEG/PNG", "BMP type not supported: unsupported compression"); // this includes PNG/JPEG modes + if (compress == 3 && info->bpp != 16 && info->bpp != 32) return stbi__errpuc("bad BMP", "bad BMP"); // bitfields requires 16 or 32 bits/pixel + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + stbi__bmp_set_mask_defaults(info, compress); + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->extra_read += 12; + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + // V4/V5 header + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + if (compress != 3) // override mr/mg/mb unless in BI_BITFIELDS mode, as per docs + stbi__bmp_set_mask_defaults(info, compress); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - info.extra_read - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - info.extra_read - info.hsz) >> 2; + } + if (psize == 0) { + if (info.offset != s->callback_already_read + (s->img_buffer - s->img_buffer_original)) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } + } + + if (info.bpp == 24 && ma == 0xff000000) + s->img_n = 3; + else + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - info.extra_read - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - info.extra_read - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + if (rcount > 8 || gcount > 8 || bcount > 8 || acount > 8) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i]; p1[i] = p2[i]; p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + STBI_NOTUSED(tga_x_origin); // @TODO + STBI_NOTUSED(tga_y_origin); // @TODO + + if (tga_height > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (tga_width > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + if (tga_palette_len == 0) { /* you have to have at least one entry! */ + STBI_FREE(tga_data); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + STBI_NOTUSED(tga_palette_start); + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + if (h > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (w > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + + if (y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + if (!result) return stbi__errpuc("outofmem", "Out of memory"); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (g->w > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + if (g->h > STBI_MAX_DIMENSIONS) return stbi__err("too large","Very large image (corrupt?)"); + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!g) return stbi__err("outofmem", "Out of memory"); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + STBI_NOTUSED(req_comp); + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + if (!stbi__mad3sizes_valid(4, g->w, g->h, 0)) + return stbi__errpuc("too large", "GIF image is too large"); + pcount = g->w * g->h; + g->out = (stbi_uc *) stbi__malloc(4 * pcount); + g->background = (stbi_uc *) stbi__malloc(4 * pcount); + g->history = (stbi_uc *) stbi__malloc(pcount); + if (!g->out || !g->background || !g->history) + return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "transparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to the color that was there the previous frame. + memset(g->out, 0x00, 4 * pcount); + memset(g->background, 0x00, 4 * pcount); // state of the background (starts transparent) + memset(g->history, 0x00, pcount); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispose of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + // if the width of the specified rectangle is 0, that means + // we may not see *any* pixels or the image is malformed; + // to make sure this is caught, move the current y down to + // max_y (which is what out_gif_code checks). + if (w == 0) + g->cur_y = g->max_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (!o) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main_outofmem(stbi__gif *g, stbi_uc *out, int **delays) +{ + STBI_FREE(g->out); + STBI_FREE(g->history); + STBI_FREE(g->background); + + if (out) STBI_FREE(out); + if (delays && *delays) STBI_FREE(*delays); + return stbi__errpuc("outofmem", "Out of memory"); +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + int out_size = 0; + int delays_size = 0; + + STBI_NOTUSED(out_size); + STBI_NOTUSED(delays_size); + + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + void *tmp = (stbi_uc*) STBI_REALLOC_SIZED( out, out_size, layers * stride ); + if (!tmp) + return stbi__load_gif_main_outofmem(&g, out, delays); + else { + out = (stbi_uc*) tmp; + out_size = layers * stride; + } + + if (delays) { + int *new_delays = (int*) STBI_REALLOC_SIZED( *delays, delays_size, sizeof(int) * layers ); + if (!new_delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + *delays = new_delays; + delays_size = layers * sizeof(int); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (!out) + return stbi__load_gif_main_outofmem(&g, out, delays); + out_size = layers * stride; + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + if (!*delays) + return stbi__load_gif_main_outofmem(&g, out, delays); + delays_size = layers * sizeof(int); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + STBI_NOTUSED(ri); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } else if (g.out) { + // if there was an error and we allocated an image buffer, free it! + STBI_FREE(g.out); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + if (height > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + if (width > STBI_MAX_DIMENSIONS) return stbi__errpf("too large","Very large image (corrupt?)"); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + if (p == NULL) { + stbi__rewind( s ); + return 0; + } + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) { + if (info.bpp == 24 && info.ma == 0xff000000) + *comp = 3; + else + *comp = info.ma ? 4 : 3; + } + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + STBI_NOTUSED(stbi__get32be(s)); + STBI_NOTUSED(stbi__get32be(s)); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + ri->bits_per_channel = stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n); + if (ri->bits_per_channel == 0) + return 0; + + if (s->img_y > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + if (s->img_x > STBI_MAX_DIMENSIONS) return stbi__errpuc("too large","Very large image (corrupt?)"); + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad4sizes_valid(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8)); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + if (maxv > 65535) + return stbi__err("max value > 65535", "PPM image supports only 8-bit and 16-bit images"); + else if (maxv > 255) + return 16; + else + return 8; +} + +static int stbi__pnm_is16(stbi__context *s) +{ + if (stbi__pnm_info(s, NULL, NULL, NULL) == 16) + return 1; + return 0; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_is16(s)) return 1; + #endif + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.20 (2019-02-07) support utf8 filenames in Windows; fix warnings and platform ifdefs + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/3rdparty/stblib/include/stb_truetype.h b/3rdparty/stblib/include/stb_truetype.h new file mode 100644 index 0000000000..9414a63412 --- /dev/null +++ b/3rdparty/stblib/include/stb_truetype.h @@ -0,0 +1,5017 @@ +// stb_truetype.h - v1.25 - public domain +// authored from 2009-2021 by Sean Barrett / RAD Game Tools +// +// ======================================================================= +// +// NO SECURITY GUARANTEE -- DO NOT USE THIS ON UNTRUSTED FONT FILES +// +// This library does no range checking of the offsets found in the file, +// meaning an attacker can use it to read arbitrary memory. +// +// ======================================================================= +// +// This library processes TrueType files: +// parse files +// extract glyph metrics +// extract glyph shapes +// render glyphs to one-channel bitmaps with antialiasing (box filter) +// render glyphs to one-channel SDF bitmaps (signed-distance field/function) +// +// Todo: +// non-MS cmaps +// crashproof on bad data +// hinting? (no longer patented) +// cleartype-style AA? +// optimize: use simple memory allocator for intermediates +// optimize: build edge-list directly from curves +// optimize: rasterize directly from curves? +// +// ADDITIONAL CONTRIBUTORS +// +// Mikko Mononen: compound shape support, more cmap formats +// Tor Andersson: kerning, subpixel rendering +// Dougall Johnson: OpenType / Type 2 font handling +// Daniel Ribeiro Maciel: basic GPOS-based kerning +// +// Misc other: +// Ryan Gordon +// Simon Glass +// github:IntellectualKitty +// Imanol Celaya +// Daniel Ribeiro Maciel +// +// Bug/warning reports/fixes: +// "Zer" on mollyrocket Fabian "ryg" Giesen github:NiLuJe +// Cass Everitt Martins Mozeiko github:aloucks +// stoiko (Haemimont Games) Cap Petschulat github:oyvindjam +// Brian Hook Omar Cornut github:vassvik +// Walter van Niftrik Ryan Griege +// David Gow Peter LaValle +// David Given Sergey Popov +// Ivan-Assen Ivanov Giumo X. Clanjor +// Anthony Pesch Higor Euripedes +// Johan Duparc Thomas Fields +// Hou Qiming Derek Vinyard +// Rob Loach Cort Stratton +// Kenney Phillis Jr. Brian Costabile +// Ken Voskuil (kaesve) +// +// VERSION HISTORY +// +// 1.25 (2021-07-11) many fixes +// 1.24 (2020-02-05) fix warning +// 1.23 (2020-02-02) query SVG data for glyphs; query whole kerning table (but only kern not GPOS) +// 1.22 (2019-08-11) minimize missing-glyph duplication; fix kerning if both 'GPOS' and 'kern' are defined +// 1.21 (2019-02-25) fix warning +// 1.20 (2019-02-07) PackFontRange skips missing codepoints; GetScaleFontVMetrics() +// 1.19 (2018-02-11) GPOS kerning, STBTT_fmod +// 1.18 (2018-01-29) add missing function +// 1.17 (2017-07-23) make more arguments const; doc fix +// 1.16 (2017-07-12) SDF support +// 1.15 (2017-03-03) make more arguments const +// 1.14 (2017-01-16) num-fonts-in-TTC function +// 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts +// 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual +// 1.11 (2016-04-02) fix unused-variable warning +// 1.10 (2016-04-02) user-defined fabs(); rare memory leak; remove duplicate typedef +// 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use allocation userdata properly +// 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges +// 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; +// variant PackFontRanges to pack and render in separate phases; +// fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); +// fixed an assert() bug in the new rasterizer +// replace assert() with STBTT_assert() in new rasterizer +// +// Full history can be found at the end of this file. +// +// LICENSE +// +// See end of file for license information. +// +// USAGE +// +// Include this file in whatever places need to refer to it. In ONE C/C++ +// file, write: +// #define STB_TRUETYPE_IMPLEMENTATION +// before the #include of this file. This expands out the actual +// implementation into that C/C++ file. +// +// To make the implementation private to the file that generates the implementation, +// #define STBTT_STATIC +// +// Simple 3D API (don't ship this, but it's fine for tools and quick start) +// stbtt_BakeFontBitmap() -- bake a font to a bitmap for use as texture +// stbtt_GetBakedQuad() -- compute quad to draw for a given char +// +// Improved 3D API (more shippable): +// #include "stb_rect_pack.h" -- optional, but you really want it +// stbtt_PackBegin() +// stbtt_PackSetOversampling() -- for improved quality on small fonts +// stbtt_PackFontRanges() -- pack and renders +// stbtt_PackEnd() +// stbtt_GetPackedQuad() +// +// "Load" a font file from a memory buffer (you have to keep the buffer loaded) +// stbtt_InitFont() +// stbtt_GetFontOffsetForIndex() -- indexing for TTC font collections +// stbtt_GetNumberOfFonts() -- number of fonts for TTC font collections +// +// Render a unicode codepoint to a bitmap +// stbtt_GetCodepointBitmap() -- allocates and returns a bitmap +// stbtt_MakeCodepointBitmap() -- renders into bitmap you provide +// stbtt_GetCodepointBitmapBox() -- how big the bitmap must be +// +// Character advance/positioning +// stbtt_GetCodepointHMetrics() +// stbtt_GetFontVMetrics() +// stbtt_GetFontVMetricsOS2() +// stbtt_GetCodepointKernAdvance() +// +// Starting with version 1.06, the rasterizer was replaced with a new, +// faster and generally-more-precise rasterizer. The new rasterizer more +// accurately measures pixel coverage for anti-aliasing, except in the case +// where multiple shapes overlap, in which case it overestimates the AA pixel +// coverage. Thus, anti-aliasing of intersecting shapes may look wrong. If +// this turns out to be a problem, you can re-enable the old rasterizer with +// #define STBTT_RASTERIZER_VERSION 1 +// which will incur about a 15% speed hit. +// +// ADDITIONAL DOCUMENTATION +// +// Immediately after this block comment are a series of sample programs. +// +// After the sample programs is the "header file" section. This section +// includes documentation for each API function. +// +// Some important concepts to understand to use this library: +// +// Codepoint +// Characters are defined by unicode codepoints, e.g. 65 is +// uppercase A, 231 is lowercase c with a cedilla, 0x7e30 is +// the hiragana for "ma". +// +// Glyph +// A visual character shape (every codepoint is rendered as +// some glyph) +// +// Glyph index +// A font-specific integer ID representing a glyph +// +// Baseline +// Glyph shapes are defined relative to a baseline, which is the +// bottom of uppercase characters. Characters extend both above +// and below the baseline. +// +// Current Point +// As you draw text to the screen, you keep track of a "current point" +// which is the origin of each character. The current point's vertical +// position is the baseline. Even "baked fonts" use this model. +// +// Vertical Font Metrics +// The vertical qualities of the font, used to vertically position +// and space the characters. See docs for stbtt_GetFontVMetrics. +// +// Font Size in Pixels or Points +// The preferred interface for specifying font sizes in stb_truetype +// is to specify how tall the font's vertical extent should be in pixels. +// If that sounds good enough, skip the next paragraph. +// +// Most font APIs instead use "points", which are a common typographic +// measurement for describing font size, defined as 72 points per inch. +// stb_truetype provides a point API for compatibility. However, true +// "per inch" conventions don't make much sense on computer displays +// since different monitors have different number of pixels per +// inch. For example, Windows traditionally uses a convention that +// there are 96 pixels per inch, thus making 'inch' measurements have +// nothing to do with inches, and thus effectively defining a point to +// be 1.333 pixels. Additionally, the TrueType font data provides +// an explicit scale factor to scale a given font's glyphs to points, +// but the author has observed that this scale factor is often wrong +// for non-commercial fonts, thus making fonts scaled in points +// according to the TrueType spec incoherently sized in practice. +// +// DETAILED USAGE: +// +// Scale: +// Select how high you want the font to be, in points or pixels. +// Call ScaleForPixelHeight or ScaleForMappingEmToPixels to compute +// a scale factor SF that will be used by all other functions. +// +// Baseline: +// You need to select a y-coordinate that is the baseline of where +// your text will appear. Call GetFontBoundingBox to get the baseline-relative +// bounding box for all characters. SF*-y0 will be the distance in pixels +// that the worst-case character could extend above the baseline, so if +// you want the top edge of characters to appear at the top of the +// screen where y=0, then you would set the baseline to SF*-y0. +// +// Current point: +// Set the current point where the first character will appear. The +// first character could extend left of the current point; this is font +// dependent. You can either choose a current point that is the leftmost +// point and hope, or add some padding, or check the bounding box or +// left-side-bearing of the first character to be displayed and set +// the current point based on that. +// +// Displaying a character: +// Compute the bounding box of the character. It will contain signed values +// relative to . I.e. if it returns x0,y0,x1,y1, +// then the character should be displayed in the rectangle from +// to = 32 && *text < 128) { + stbtt_aligned_quad q; + stbtt_GetBakedQuad(cdata, 512,512, *text-32, &x,&y,&q,1);//1=opengl & d3d10+,0=d3d9 + glTexCoord2f(q.s0,q.t1); glVertex2f(q.x0,q.y0); + glTexCoord2f(q.s1,q.t1); glVertex2f(q.x1,q.y0); + glTexCoord2f(q.s1,q.t0); glVertex2f(q.x1,q.y1); + glTexCoord2f(q.s0,q.t0); glVertex2f(q.x0,q.y1); + } + ++text; + } + glEnd(); +} +#endif +// +// +////////////////////////////////////////////////////////////////////////////// +// +// Complete program (this compiles): get a single bitmap, print as ASCII art +// +#if 0 +#include +#define STB_TRUETYPE_IMPLEMENTATION // force following include to generate implementation +#include "stb_truetype.h" + +char ttf_buffer[1<<25]; + +int main(int argc, char **argv) +{ + stbtt_fontinfo font; + unsigned char *bitmap; + int w,h,i,j,c = (argc > 1 ? atoi(argv[1]) : 'a'), s = (argc > 2 ? atoi(argv[2]) : 20); + + fread(ttf_buffer, 1, 1<<25, fopen(argc > 3 ? argv[3] : "c:/windows/fonts/arialbd.ttf", "rb")); + + stbtt_InitFont(&font, ttf_buffer, stbtt_GetFontOffsetForIndex(ttf_buffer,0)); + bitmap = stbtt_GetCodepointBitmap(&font, 0,stbtt_ScaleForPixelHeight(&font, s), c, &w, &h, 0,0); + + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) + putchar(" .:ioVM@"[bitmap[j*w+i]>>5]); + putchar('\n'); + } + return 0; +} +#endif +// +// Output: +// +// .ii. +// @@@@@@. +// V@Mio@@o +// :i. V@V +// :oM@@M +// :@@@MM@M +// @@o o@M +// :@@. M@M +// @@@o@@@@ +// :M@@V:@@. +// +////////////////////////////////////////////////////////////////////////////// +// +// Complete program: print "Hello World!" banner, with bugs +// +#if 0 +char buffer[24<<20]; +unsigned char screen[20][79]; + +int main(int arg, char **argv) +{ + stbtt_fontinfo font; + int i,j,ascent,baseline,ch=0; + float scale, xpos=2; // leave a little padding in case the character extends left + char *text = "Heljo World!"; // intentionally misspelled to show 'lj' brokenness + + fread(buffer, 1, 1000000, fopen("c:/windows/fonts/arialbd.ttf", "rb")); + stbtt_InitFont(&font, buffer, 0); + + scale = stbtt_ScaleForPixelHeight(&font, 15); + stbtt_GetFontVMetrics(&font, &ascent,0,0); + baseline = (int) (ascent*scale); + + while (text[ch]) { + int advance,lsb,x0,y0,x1,y1; + float x_shift = xpos - (float) floor(xpos); + stbtt_GetCodepointHMetrics(&font, text[ch], &advance, &lsb); + stbtt_GetCodepointBitmapBoxSubpixel(&font, text[ch], scale,scale,x_shift,0, &x0,&y0,&x1,&y1); + stbtt_MakeCodepointBitmapSubpixel(&font, &screen[baseline + y0][(int) xpos + x0], x1-x0,y1-y0, 79, scale,scale,x_shift,0, text[ch]); + // note that this stomps the old data, so where character boxes overlap (e.g. 'lj') it's wrong + // because this API is really for baking character bitmaps into textures. if you want to render + // a sequence of characters, you really need to render each bitmap to a temp buffer, then + // "alpha blend" that into the working buffer + xpos += (advance * scale); + if (text[ch+1]) + xpos += scale*stbtt_GetCodepointKernAdvance(&font, text[ch],text[ch+1]); + ++ch; + } + + for (j=0; j < 20; ++j) { + for (i=0; i < 78; ++i) + putchar(" .:ioVM@"[screen[j][i]>>5]); + putchar('\n'); + } + + return 0; +} +#endif + + +////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////// +//// +//// INTEGRATION WITH YOUR CODEBASE +//// +//// The following sections allow you to supply alternate definitions +//// of C library functions used by stb_truetype, e.g. if you don't +//// link with the C runtime library. + +#ifdef STB_TRUETYPE_IMPLEMENTATION + // #define your own (u)stbtt_int8/16/32 before including to override this + #ifndef stbtt_uint8 + typedef unsigned char stbtt_uint8; + typedef signed char stbtt_int8; + typedef unsigned short stbtt_uint16; + typedef signed short stbtt_int16; + typedef unsigned int stbtt_uint32; + typedef signed int stbtt_int32; + #endif + + typedef char stbtt__check_size32[sizeof(stbtt_int32)==4 ? 1 : -1]; + typedef char stbtt__check_size16[sizeof(stbtt_int16)==2 ? 1 : -1]; + + // e.g. #define your own STBTT_ifloor/STBTT_iceil() to avoid math.h + #ifndef STBTT_ifloor + #include + #define STBTT_ifloor(x) ((int) floor(x)) + #define STBTT_iceil(x) ((int) ceil(x)) + #endif + + #ifndef STBTT_sqrt + #include + #define STBTT_sqrt(x) sqrt(x) + #define STBTT_pow(x,y) pow(x,y) + #endif + + #ifndef STBTT_fmod + #include + #define STBTT_fmod(x,y) fmod(x,y) + #endif + + #ifndef STBTT_cos + #include + #define STBTT_cos(x) cos(x) + #define STBTT_acos(x) acos(x) + #endif + + #ifndef STBTT_fabs + #include + #define STBTT_fabs(x) fabs(x) + #endif + + // #define your own functions "STBTT_malloc" / "STBTT_free" to avoid malloc.h + #ifndef STBTT_malloc + #include + #define STBTT_malloc(x,u) ((void)(u),malloc(x)) + #define STBTT_free(x,u) ((void)(u),free(x)) + #endif + + #ifndef STBTT_assert + #include + #define STBTT_assert(x) assert(x) + #endif + + #ifndef STBTT_strlen + #include + #define STBTT_strlen(x) strlen(x) + #endif + + #ifndef STBTT_memcpy + #include + #define STBTT_memcpy memcpy + #define STBTT_memset memset + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +//// +//// INTERFACE +//// +//// + +#ifndef __STB_INCLUDE_STB_TRUETYPE_H__ +#define __STB_INCLUDE_STB_TRUETYPE_H__ + +#ifdef STBTT_STATIC +#define STBTT_DEF static +#else +#define STBTT_DEF extern +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// private structure +typedef struct +{ + unsigned char *data; + int cursor; + int size; +} stbtt__buf; + +////////////////////////////////////////////////////////////////////////////// +// +// TEXTURE BAKING API +// +// If you use this API, you only have to call two functions ever. +// + +typedef struct +{ + unsigned short x0,y0,x1,y1; // coordinates of bbox in bitmap + float xoff,yoff,xadvance; +} stbtt_bakedchar; + +STBTT_DEF int stbtt_BakeFontBitmap(const unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) + float pixel_height, // height of font in pixels + unsigned char *pixels, int pw, int ph, // bitmap to be filled in + int first_char, int num_chars, // characters to bake + stbtt_bakedchar *chardata); // you allocate this, it's num_chars long +// if return is positive, the first unused row of the bitmap +// if return is negative, returns the negative of the number of characters that fit +// if return is 0, no characters fit and no rows were used +// This uses a very crappy packing. + +typedef struct +{ + float x0,y0,s0,t0; // top-left + float x1,y1,s1,t1; // bottom-right +} stbtt_aligned_quad; + +STBTT_DEF void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, // same data as above + int char_index, // character to display + float *xpos, float *ypos, // pointers to current position in screen pixel space + stbtt_aligned_quad *q, // output: quad to draw + int opengl_fillrule); // true if opengl fill rule; false if DX9 or earlier +// Call GetBakedQuad with char_index = 'character - first_char', and it +// creates the quad you need to draw and advances the current position. +// +// The coordinate system used assumes y increases downwards. +// +// Characters will extend both above and below the current position; +// see discussion of "BASELINE" above. +// +// It's inefficient; you might want to c&p it and optimize it. + +STBTT_DEF void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap); +// Query the font vertical metrics without having to create a font first. + + +////////////////////////////////////////////////////////////////////////////// +// +// NEW TEXTURE BAKING API +// +// This provides options for packing multiple fonts into one atlas, not +// perfectly but better than nothing. + +typedef struct +{ + unsigned short x0,y0,x1,y1; // coordinates of bbox in bitmap + float xoff,yoff,xadvance; + float xoff2,yoff2; +} stbtt_packedchar; + +typedef struct stbtt_pack_context stbtt_pack_context; +typedef struct stbtt_fontinfo stbtt_fontinfo; +#ifndef STB_RECT_PACK_VERSION +typedef struct stbrp_rect stbrp_rect; +#endif + +STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int width, int height, int stride_in_bytes, int padding, void *alloc_context); +// Initializes a packing context stored in the passed-in stbtt_pack_context. +// Future calls using this context will pack characters into the bitmap passed +// in here: a 1-channel bitmap that is width * height. stride_in_bytes is +// the distance from one row to the next (or 0 to mean they are packed tightly +// together). "padding" is the amount of padding to leave between each +// character (normally you want '1' for bitmaps you'll use as textures with +// bilinear filtering). +// +// Returns 0 on failure, 1 on success. + +STBTT_DEF void stbtt_PackEnd (stbtt_pack_context *spc); +// Cleans up the packing context and frees all memory. + +#define STBTT_POINT_SIZE(x) (-(x)) + +STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, + int first_unicode_char_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range); +// Creates character bitmaps from the font_index'th font found in fontdata (use +// font_index=0 if you don't know what that is). It creates num_chars_in_range +// bitmaps for characters with unicode values starting at first_unicode_char_in_range +// and increasing. Data for how to render them is stored in chardata_for_range; +// pass these to stbtt_GetPackedQuad to get back renderable quads. +// +// font_size is the full height of the character from ascender to descender, +// as computed by stbtt_ScaleForPixelHeight. To use a point size as computed +// by stbtt_ScaleForMappingEmToPixels, wrap the point size in STBTT_POINT_SIZE() +// and pass that result as 'font_size': +// ..., 20 , ... // font max minus min y is 20 pixels tall +// ..., STBTT_POINT_SIZE(20), ... // 'M' is 20 pixels tall + +typedef struct +{ + float font_size; + int first_unicode_codepoint_in_range; // if non-zero, then the chars are continuous, and this is the first codepoint + int *array_of_unicode_codepoints; // if non-zero, then this is an array of unicode codepoints + int num_chars; + stbtt_packedchar *chardata_for_range; // output + unsigned char h_oversample, v_oversample; // don't set these, they're used internally +} stbtt_pack_range; + +STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges); +// Creates character bitmaps from multiple ranges of characters stored in +// ranges. This will usually create a better-packed bitmap than multiple +// calls to stbtt_PackFontRange. Note that you can call this multiple +// times within a single PackBegin/PackEnd. + +STBTT_DEF void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample); +// Oversampling a font increases the quality by allowing higher-quality subpixel +// positioning, and is especially valuable at smaller text sizes. +// +// This function sets the amount of oversampling for all following calls to +// stbtt_PackFontRange(s) or stbtt_PackFontRangesGatherRects for a given +// pack context. The default (no oversampling) is achieved by h_oversample=1 +// and v_oversample=1. The total number of pixels required is +// h_oversample*v_oversample larger than the default; for example, 2x2 +// oversampling requires 4x the storage of 1x1. For best results, render +// oversampled textures with bilinear filtering. Look at the readme in +// stb/tests/oversample for information about oversampled fonts +// +// To use with PackFontRangesGather etc., you must set it before calls +// call to PackFontRangesGatherRects. + +STBTT_DEF void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip); +// If skip != 0, this tells stb_truetype to skip any codepoints for which +// there is no corresponding glyph. If skip=0, which is the default, then +// codepoints without a glyph recived the font's "missing character" glyph, +// typically an empty box by convention. + +STBTT_DEF void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, // same data as above + int char_index, // character to display + float *xpos, float *ypos, // pointers to current position in screen pixel space + stbtt_aligned_quad *q, // output: quad to draw + int align_to_integer); + +STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects); +STBTT_DEF void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects); +STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects); +// Calling these functions in sequence is roughly equivalent to calling +// stbtt_PackFontRanges(). If you more control over the packing of multiple +// fonts, or if you want to pack custom data into a font texture, take a look +// at the source to of stbtt_PackFontRanges() and create a custom version +// using these functions, e.g. call GatherRects multiple times, +// building up a single array of rects, then call PackRects once, +// then call RenderIntoRects repeatedly. This may result in a +// better packing than calling PackFontRanges multiple times +// (or it may not). + +// this is an opaque structure that you shouldn't mess with which holds +// all the context needed from PackBegin to PackEnd. +struct stbtt_pack_context { + void *user_allocator_context; + void *pack_info; + int width; + int height; + int stride_in_bytes; + int padding; + int skip_missing; + unsigned int h_oversample, v_oversample; + unsigned char *pixels; + void *nodes; +}; + +////////////////////////////////////////////////////////////////////////////// +// +// FONT LOADING +// +// + +STBTT_DEF int stbtt_GetNumberOfFonts(const unsigned char *data); +// This function will determine the number of fonts in a font file. TrueType +// collection (.ttc) files may contain multiple fonts, while TrueType font +// (.ttf) files only contain one font. The number of fonts can be used for +// indexing with the previous function where the index is between zero and one +// less than the total fonts. If an error occurs, -1 is returned. + +STBTT_DEF int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index); +// Each .ttf/.ttc file may have more than one font. Each font has a sequential +// index number starting from 0. Call this function to get the font offset for +// a given index; it returns -1 if the index is out of range. A regular .ttf +// file will only define one font and it always be at offset 0, so it will +// return '0' for index 0, and -1 for all other indices. + +// The following structure is defined publicly so you can declare one on +// the stack or as a global or etc, but you should treat it as opaque. +struct stbtt_fontinfo +{ + void * userdata; + unsigned char * data; // pointer to .ttf file + int fontstart; // offset of start of font + + int numGlyphs; // number of glyphs, needed for range checking + + int loca,head,glyf,hhea,hmtx,kern,gpos,svg; // table locations as offset from start of .ttf + int index_map; // a cmap mapping for our chosen character encoding + int indexToLocFormat; // format needed to map from glyph index to glyph + + stbtt__buf cff; // cff font data + stbtt__buf charstrings; // the charstring index + stbtt__buf gsubrs; // global charstring subroutines index + stbtt__buf subrs; // private charstring subroutines index + stbtt__buf fontdicts; // array of font dicts + stbtt__buf fdselect; // map from glyph to fontdict +}; + +STBTT_DEF int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset); +// Given an offset into the file that defines a font, this function builds +// the necessary cached info for the rest of the system. You must allocate +// the stbtt_fontinfo yourself, and stbtt_InitFont will fill it out. You don't +// need to do anything special to free it, because the contents are pure +// value data with no additional data structures. Returns 0 on failure. + + +////////////////////////////////////////////////////////////////////////////// +// +// CHARACTER TO GLYPH-INDEX CONVERSIOn + +STBTT_DEF int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint); +// If you're going to perform multiple operations on the same character +// and you want a speed-up, call this function with the character you're +// going to process, then use glyph-based functions instead of the +// codepoint-based functions. +// Returns 0 if the character codepoint is not defined in the font. + + +////////////////////////////////////////////////////////////////////////////// +// +// CHARACTER PROPERTIES +// + +STBTT_DEF float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float pixels); +// computes a scale factor to produce a font whose "height" is 'pixels' tall. +// Height is measured as the distance from the highest ascender to the lowest +// descender; in other words, it's equivalent to calling stbtt_GetFontVMetrics +// and computing: +// scale = pixels / (ascent - descent) +// so if you prefer to measure height by the ascent only, use a similar calculation. + +STBTT_DEF float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels); +// computes a scale factor to produce a font whose EM size is mapped to +// 'pixels' tall. This is probably what traditional APIs compute, but +// I'm not positive. + +STBTT_DEF void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap); +// ascent is the coordinate above the baseline the font extends; descent +// is the coordinate below the baseline the font extends (i.e. it is typically negative) +// lineGap is the spacing between one row's descent and the next row's ascent... +// so you should advance the vertical position by "*ascent - *descent + *lineGap" +// these are expressed in unscaled coordinates, so you must multiply by +// the scale factor for a given size + +STBTT_DEF int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap); +// analogous to GetFontVMetrics, but returns the "typographic" values from the OS/2 +// table (specific to MS/Windows TTF files). +// +// Returns 1 on success (table present), 0 on failure. + +STBTT_DEF void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1); +// the bounding box around all possible characters + +STBTT_DEF void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing); +// leftSideBearing is the offset from the current horizontal position to the left edge of the character +// advanceWidth is the offset from the current horizontal position to the next horizontal position +// these are expressed in unscaled coordinates + +STBTT_DEF int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2); +// an additional amount to add to the 'advance' value between ch1 and ch2 + +STBTT_DEF int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1); +// Gets the bounding box of the visible part of the glyph, in unscaled coordinates + +STBTT_DEF void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing); +STBTT_DEF int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2); +STBTT_DEF int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); +// as above, but takes one or more glyph indices for greater efficiency + +typedef struct stbtt_kerningentry +{ + int glyph1; // use stbtt_FindGlyphIndex + int glyph2; + int advance; +} stbtt_kerningentry; + +STBTT_DEF int stbtt_GetKerningTableLength(const stbtt_fontinfo *info); +STBTT_DEF int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length); +// Retrieves a complete list of all of the kerning pairs provided by the font +// stbtt_GetKerningTable never writes more than table_length entries and returns how many entries it did write. +// The table will be sorted by (a.glyph1 == b.glyph1)?(a.glyph2 < b.glyph2):(a.glyph1 < b.glyph1) + +////////////////////////////////////////////////////////////////////////////// +// +// GLYPH SHAPES (you probably don't need these, but they have to go before +// the bitmaps for C declaration-order reasons) +// + +#ifndef STBTT_vmove // you can predefine these to use different values (but why?) + enum { + STBTT_vmove=1, + STBTT_vline, + STBTT_vcurve, + STBTT_vcubic + }; +#endif + +#ifndef stbtt_vertex // you can predefine this to use different values + // (we share this with other code at RAD) + #define stbtt_vertex_type short // can't use stbtt_int16 because that's not visible in the header file + typedef struct + { + stbtt_vertex_type x,y,cx,cy,cx1,cy1; + unsigned char type,padding; + } stbtt_vertex; +#endif + +STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index); +// returns non-zero if nothing is drawn for this glyph + +STBTT_DEF int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices); +STBTT_DEF int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **vertices); +// returns # of vertices and fills *vertices with the pointer to them +// these are expressed in "unscaled" coordinates +// +// The shape is a series of contours. Each one starts with +// a STBTT_moveto, then consists of a series of mixed +// STBTT_lineto and STBTT_curveto segments. A lineto +// draws a line from previous endpoint to its x,y; a curveto +// draws a quadratic bezier from previous endpoint to +// its x,y, using cx,cy as the bezier control point. + +STBTT_DEF void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *vertices); +// frees the data allocated above + +STBTT_DEF unsigned char *stbtt_FindSVGDoc(const stbtt_fontinfo *info, int gl); +STBTT_DEF int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg); +STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg); +// fills svg with the character's SVG data. +// returns data size or 0 if SVG not found. + +////////////////////////////////////////////////////////////////////////////// +// +// BITMAP RENDERING +// + +STBTT_DEF void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata); +// frees the bitmap allocated below + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff); +// allocates a large-enough single-channel 8bpp bitmap and renders the +// specified character/glyph at the specified scale into it, with +// antialiasing. 0 is no coverage (transparent), 255 is fully covered (opaque). +// *width & *height are filled out with the width & height of the bitmap, +// which is stored left-to-right, top-to-bottom. +// +// xoff/yoff are the offset it pixel space from the glyph origin to the top-left of the bitmap + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff); +// the same as stbtt_GetCodepoitnBitmap, but you can specify a subpixel +// shift for the character + +STBTT_DEF void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint); +// the same as stbtt_GetCodepointBitmap, but you pass in storage for the bitmap +// in the form of 'output', with row spacing of 'out_stride' bytes. the bitmap +// is clipped to out_w/out_h bytes. Call stbtt_GetCodepointBitmapBox to get the +// width and height and positioning info for it first. + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint); +// same as stbtt_MakeCodepointBitmap, but you can specify a subpixel +// shift for the character + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint); +// same as stbtt_MakeCodepointBitmapSubpixel, but prefiltering +// is performed (see stbtt_PackSetOversampling) + +STBTT_DEF void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1); +// get the bbox of the bitmap centered around the glyph origin; so the +// bitmap width is ix1-ix0, height is iy1-iy0, and location to place +// the bitmap top left is (leftSideBearing*scale,iy0). +// (Note that the bitmap uses y-increases-down, but the shape uses +// y-increases-up, so CodepointBitmapBox and CodepointBox are inverted.) + +STBTT_DEF void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1); +// same as stbtt_GetCodepointBitmapBox, but you can specify a subpixel +// shift for the character + +// the following functions are equivalent to the above functions, but operate +// on glyph indices instead of Unicode codepoints (for efficiency) +STBTT_DEF unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph); +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph); +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int glyph); +STBTT_DEF void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1); +STBTT_DEF void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y,float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1); + + +// @TODO: don't expose this structure +typedef struct +{ + int w,h,stride; + unsigned char *pixels; +} stbtt__bitmap; + +// rasterize a shape with quadratic beziers into a bitmap +STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, // 1-channel bitmap to draw into + float flatness_in_pixels, // allowable error of curve in pixels + stbtt_vertex *vertices, // array of vertices defining shape + int num_verts, // number of vertices in above array + float scale_x, float scale_y, // scale applied to input vertices + float shift_x, float shift_y, // translation applied to input vertices + int x_off, int y_off, // another translation applied to input + int invert, // if non-zero, vertically flip shape + void *userdata); // context for to STBTT_MALLOC + +////////////////////////////////////////////////////////////////////////////// +// +// Signed Distance Function (or Field) rendering + +STBTT_DEF void stbtt_FreeSDF(unsigned char *bitmap, void *userdata); +// frees the SDF bitmap allocated below + +STBTT_DEF unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF unsigned char * stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff); +// These functions compute a discretized SDF field for a single character, suitable for storing +// in a single-channel texture, sampling with bilinear filtering, and testing against +// larger than some threshold to produce scalable fonts. +// info -- the font +// scale -- controls the size of the resulting SDF bitmap, same as it would be creating a regular bitmap +// glyph/codepoint -- the character to generate the SDF for +// padding -- extra "pixels" around the character which are filled with the distance to the character (not 0), +// which allows effects like bit outlines +// onedge_value -- value 0-255 to test the SDF against to reconstruct the character (i.e. the isocontour of the character) +// pixel_dist_scale -- what value the SDF should increase by when moving one SDF "pixel" away from the edge (on the 0..255 scale) +// if positive, > onedge_value is inside; if negative, < onedge_value is inside +// width,height -- output height & width of the SDF bitmap (including padding) +// xoff,yoff -- output origin of the character +// return value -- a 2D array of bytes 0..255, width*height in size +// +// pixel_dist_scale & onedge_value are a scale & bias that allows you to make +// optimal use of the limited 0..255 for your application, trading off precision +// and special effects. SDF values outside the range 0..255 are clamped to 0..255. +// +// Example: +// scale = stbtt_ScaleForPixelHeight(22) +// padding = 5 +// onedge_value = 180 +// pixel_dist_scale = 180/5.0 = 36.0 +// +// This will create an SDF bitmap in which the character is about 22 pixels +// high but the whole bitmap is about 22+5+5=32 pixels high. To produce a filled +// shape, sample the SDF at each pixel and fill the pixel if the SDF value +// is greater than or equal to 180/255. (You'll actually want to antialias, +// which is beyond the scope of this example.) Additionally, you can compute +// offset outlines (e.g. to stroke the character border inside & outside, +// or only outside). For example, to fill outside the character up to 3 SDF +// pixels, you would compare against (180-36.0*3)/255 = 72/255. The above +// choice of variables maps a range from 5 pixels outside the shape to +// 2 pixels inside the shape to 0..255; this is intended primarily for apply +// outside effects only (the interior range is needed to allow proper +// antialiasing of the font at *smaller* sizes) +// +// The function computes the SDF analytically at each SDF pixel, not by e.g. +// building a higher-res bitmap and approximating it. In theory the quality +// should be as high as possible for an SDF of this size & representation, but +// unclear if this is true in practice (perhaps building a higher-res bitmap +// and computing from that can allow drop-out prevention). +// +// The algorithm has not been optimized at all, so expect it to be slow +// if computing lots of characters or very large sizes. + + + +////////////////////////////////////////////////////////////////////////////// +// +// Finding the right font... +// +// You should really just solve this offline, keep your own tables +// of what font is what, and don't try to get it out of the .ttf file. +// That's because getting it out of the .ttf file is really hard, because +// the names in the file can appear in many possible encodings, in many +// possible languages, and e.g. if you need a case-insensitive comparison, +// the details of that depend on the encoding & language in a complex way +// (actually underspecified in truetype, but also gigantic). +// +// But you can use the provided functions in two possible ways: +// stbtt_FindMatchingFont() will use *case-sensitive* comparisons on +// unicode-encoded names to try to find the font you want; +// you can run this before calling stbtt_InitFont() +// +// stbtt_GetFontNameString() lets you get any of the various strings +// from the file yourself and do your own comparisons on them. +// You have to have called stbtt_InitFont() first. + + +STBTT_DEF int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags); +// returns the offset (not index) of the font that matches, or -1 if none +// if you use STBTT_MACSTYLE_DONTCARE, use a font name like "Arial Bold". +// if you use any other flag, use a font name like "Arial"; this checks +// the 'macStyle' header field; i don't know if fonts set this consistently +#define STBTT_MACSTYLE_DONTCARE 0 +#define STBTT_MACSTYLE_BOLD 1 +#define STBTT_MACSTYLE_ITALIC 2 +#define STBTT_MACSTYLE_UNDERSCORE 4 +#define STBTT_MACSTYLE_NONE 8 // <= not same as 0, this makes us check the bitfield is 0 + +STBTT_DEF int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2); +// returns 1/0 whether the first string interpreted as utf8 is identical to +// the second string interpreted as big-endian utf16... useful for strings from next func + +STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID); +// returns the string (which may be big-endian double byte, e.g. for unicode) +// and puts the length in bytes in *length. +// +// some of the values for the IDs are below; for more see the truetype spec: +// http://developer.apple.com/textfonts/TTRefMan/RM06/Chap6name.html +// http://www.microsoft.com/typography/otspec/name.htm + +enum { // platformID + STBTT_PLATFORM_ID_UNICODE =0, + STBTT_PLATFORM_ID_MAC =1, + STBTT_PLATFORM_ID_ISO =2, + STBTT_PLATFORM_ID_MICROSOFT =3 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_UNICODE + STBTT_UNICODE_EID_UNICODE_1_0 =0, + STBTT_UNICODE_EID_UNICODE_1_1 =1, + STBTT_UNICODE_EID_ISO_10646 =2, + STBTT_UNICODE_EID_UNICODE_2_0_BMP=3, + STBTT_UNICODE_EID_UNICODE_2_0_FULL=4 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_MICROSOFT + STBTT_MS_EID_SYMBOL =0, + STBTT_MS_EID_UNICODE_BMP =1, + STBTT_MS_EID_SHIFTJIS =2, + STBTT_MS_EID_UNICODE_FULL =10 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_MAC; same as Script Manager codes + STBTT_MAC_EID_ROMAN =0, STBTT_MAC_EID_ARABIC =4, + STBTT_MAC_EID_JAPANESE =1, STBTT_MAC_EID_HEBREW =5, + STBTT_MAC_EID_CHINESE_TRAD =2, STBTT_MAC_EID_GREEK =6, + STBTT_MAC_EID_KOREAN =3, STBTT_MAC_EID_RUSSIAN =7 +}; + +enum { // languageID for STBTT_PLATFORM_ID_MICROSOFT; same as LCID... + // problematic because there are e.g. 16 english LCIDs and 16 arabic LCIDs + STBTT_MS_LANG_ENGLISH =0x0409, STBTT_MS_LANG_ITALIAN =0x0410, + STBTT_MS_LANG_CHINESE =0x0804, STBTT_MS_LANG_JAPANESE =0x0411, + STBTT_MS_LANG_DUTCH =0x0413, STBTT_MS_LANG_KOREAN =0x0412, + STBTT_MS_LANG_FRENCH =0x040c, STBTT_MS_LANG_RUSSIAN =0x0419, + STBTT_MS_LANG_GERMAN =0x0407, STBTT_MS_LANG_SPANISH =0x0409, + STBTT_MS_LANG_HEBREW =0x040d, STBTT_MS_LANG_SWEDISH =0x041D +}; + +enum { // languageID for STBTT_PLATFORM_ID_MAC + STBTT_MAC_LANG_ENGLISH =0 , STBTT_MAC_LANG_JAPANESE =11, + STBTT_MAC_LANG_ARABIC =12, STBTT_MAC_LANG_KOREAN =23, + STBTT_MAC_LANG_DUTCH =4 , STBTT_MAC_LANG_RUSSIAN =32, + STBTT_MAC_LANG_FRENCH =1 , STBTT_MAC_LANG_SPANISH =6 , + STBTT_MAC_LANG_GERMAN =2 , STBTT_MAC_LANG_SWEDISH =5 , + STBTT_MAC_LANG_HEBREW =10, STBTT_MAC_LANG_CHINESE_SIMPLIFIED =33, + STBTT_MAC_LANG_ITALIAN =3 , STBTT_MAC_LANG_CHINESE_TRAD =19 +}; + +#ifdef __cplusplus +} +#endif + +#endif // __STB_INCLUDE_STB_TRUETYPE_H__ + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +//// +//// IMPLEMENTATION +//// +//// + +#ifdef STB_TRUETYPE_IMPLEMENTATION + +#ifndef STBTT_MAX_OVERSAMPLE +#define STBTT_MAX_OVERSAMPLE 8 +#endif + +#if STBTT_MAX_OVERSAMPLE > 255 +#error "STBTT_MAX_OVERSAMPLE cannot be > 255" +#endif + +typedef int stbtt__test_oversample_pow2[(STBTT_MAX_OVERSAMPLE & (STBTT_MAX_OVERSAMPLE-1)) == 0 ? 1 : -1]; + +#ifndef STBTT_RASTERIZER_VERSION +#define STBTT_RASTERIZER_VERSION 2 +#endif + +#ifdef _MSC_VER +#define STBTT__NOTUSED(v) (void)(v) +#else +#define STBTT__NOTUSED(v) (void)sizeof(v) +#endif + +////////////////////////////////////////////////////////////////////////// +// +// stbtt__buf helpers to parse data from file +// + +static stbtt_uint8 stbtt__buf_get8(stbtt__buf *b) +{ + if (b->cursor >= b->size) + return 0; + return b->data[b->cursor++]; +} + +static stbtt_uint8 stbtt__buf_peek8(stbtt__buf *b) +{ + if (b->cursor >= b->size) + return 0; + return b->data[b->cursor]; +} + +static void stbtt__buf_seek(stbtt__buf *b, int o) +{ + STBTT_assert(!(o > b->size || o < 0)); + b->cursor = (o > b->size || o < 0) ? b->size : o; +} + +static void stbtt__buf_skip(stbtt__buf *b, int o) +{ + stbtt__buf_seek(b, b->cursor + o); +} + +static stbtt_uint32 stbtt__buf_get(stbtt__buf *b, int n) +{ + stbtt_uint32 v = 0; + int i; + STBTT_assert(n >= 1 && n <= 4); + for (i = 0; i < n; i++) + v = (v << 8) | stbtt__buf_get8(b); + return v; +} + +static stbtt__buf stbtt__new_buf(const void *p, size_t size) +{ + stbtt__buf r; + STBTT_assert(size < 0x40000000); + r.data = (stbtt_uint8*) p; + r.size = (int) size; + r.cursor = 0; + return r; +} + +#define stbtt__buf_get16(b) stbtt__buf_get((b), 2) +#define stbtt__buf_get32(b) stbtt__buf_get((b), 4) + +static stbtt__buf stbtt__buf_range(const stbtt__buf *b, int o, int s) +{ + stbtt__buf r = stbtt__new_buf(NULL, 0); + if (o < 0 || s < 0 || o > b->size || s > b->size - o) return r; + r.data = b->data + o; + r.size = s; + return r; +} + +static stbtt__buf stbtt__cff_get_index(stbtt__buf *b) +{ + int count, start, offsize; + start = b->cursor; + count = stbtt__buf_get16(b); + if (count) { + offsize = stbtt__buf_get8(b); + STBTT_assert(offsize >= 1 && offsize <= 4); + stbtt__buf_skip(b, offsize * count); + stbtt__buf_skip(b, stbtt__buf_get(b, offsize) - 1); + } + return stbtt__buf_range(b, start, b->cursor - start); +} + +static stbtt_uint32 stbtt__cff_int(stbtt__buf *b) +{ + int b0 = stbtt__buf_get8(b); + if (b0 >= 32 && b0 <= 246) return b0 - 139; + else if (b0 >= 247 && b0 <= 250) return (b0 - 247)*256 + stbtt__buf_get8(b) + 108; + else if (b0 >= 251 && b0 <= 254) return -(b0 - 251)*256 - stbtt__buf_get8(b) - 108; + else if (b0 == 28) return stbtt__buf_get16(b); + else if (b0 == 29) return stbtt__buf_get32(b); + STBTT_assert(0); + return 0; +} + +static void stbtt__cff_skip_operand(stbtt__buf *b) { + int v, b0 = stbtt__buf_peek8(b); + STBTT_assert(b0 >= 28); + if (b0 == 30) { + stbtt__buf_skip(b, 1); + while (b->cursor < b->size) { + v = stbtt__buf_get8(b); + if ((v & 0xF) == 0xF || (v >> 4) == 0xF) + break; + } + } else { + stbtt__cff_int(b); + } +} + +static stbtt__buf stbtt__dict_get(stbtt__buf *b, int key) +{ + stbtt__buf_seek(b, 0); + while (b->cursor < b->size) { + int start = b->cursor, end, op; + while (stbtt__buf_peek8(b) >= 28) + stbtt__cff_skip_operand(b); + end = b->cursor; + op = stbtt__buf_get8(b); + if (op == 12) op = stbtt__buf_get8(b) | 0x100; + if (op == key) return stbtt__buf_range(b, start, end-start); + } + return stbtt__buf_range(b, 0, 0); +} + +static void stbtt__dict_get_ints(stbtt__buf *b, int key, int outcount, stbtt_uint32 *out) +{ + int i; + stbtt__buf operands = stbtt__dict_get(b, key); + for (i = 0; i < outcount && operands.cursor < operands.size; i++) + out[i] = stbtt__cff_int(&operands); +} + +static int stbtt__cff_index_count(stbtt__buf *b) +{ + stbtt__buf_seek(b, 0); + return stbtt__buf_get16(b); +} + +static stbtt__buf stbtt__cff_index_get(stbtt__buf b, int i) +{ + int count, offsize, start, end; + stbtt__buf_seek(&b, 0); + count = stbtt__buf_get16(&b); + offsize = stbtt__buf_get8(&b); + STBTT_assert(i >= 0 && i < count); + STBTT_assert(offsize >= 1 && offsize <= 4); + stbtt__buf_skip(&b, i*offsize); + start = stbtt__buf_get(&b, offsize); + end = stbtt__buf_get(&b, offsize); + return stbtt__buf_range(&b, 2+(count+1)*offsize+start, end - start); +} + +////////////////////////////////////////////////////////////////////////// +// +// accessors to parse data from file +// + +// on platforms that don't allow misaligned reads, if we want to allow +// truetype fonts that aren't padded to alignment, define ALLOW_UNALIGNED_TRUETYPE + +#define ttBYTE(p) (* (stbtt_uint8 *) (p)) +#define ttCHAR(p) (* (stbtt_int8 *) (p)) +#define ttFixed(p) ttLONG(p) + +static stbtt_uint16 ttUSHORT(stbtt_uint8 *p) { return p[0]*256 + p[1]; } +static stbtt_int16 ttSHORT(stbtt_uint8 *p) { return p[0]*256 + p[1]; } +static stbtt_uint32 ttULONG(stbtt_uint8 *p) { return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; } +static stbtt_int32 ttLONG(stbtt_uint8 *p) { return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; } + +#define stbtt_tag4(p,c0,c1,c2,c3) ((p)[0] == (c0) && (p)[1] == (c1) && (p)[2] == (c2) && (p)[3] == (c3)) +#define stbtt_tag(p,str) stbtt_tag4(p,str[0],str[1],str[2],str[3]) + +static int stbtt__isfont(stbtt_uint8 *font) +{ + // check the version number + if (stbtt_tag4(font, '1',0,0,0)) return 1; // TrueType 1 + if (stbtt_tag(font, "typ1")) return 1; // TrueType with type 1 font -- we don't support this! + if (stbtt_tag(font, "OTTO")) return 1; // OpenType with CFF + if (stbtt_tag4(font, 0,1,0,0)) return 1; // OpenType 1.0 + if (stbtt_tag(font, "true")) return 1; // Apple specification for TrueType fonts + return 0; +} + +// @OPTIMIZE: binary search +static stbtt_uint32 stbtt__find_table(stbtt_uint8 *data, stbtt_uint32 fontstart, const char *tag) +{ + stbtt_int32 num_tables = ttUSHORT(data+fontstart+4); + stbtt_uint32 tabledir = fontstart + 12; + stbtt_int32 i; + for (i=0; i < num_tables; ++i) { + stbtt_uint32 loc = tabledir + 16*i; + if (stbtt_tag(data+loc+0, tag)) + return ttULONG(data+loc+8); + } + return 0; +} + +static int stbtt_GetFontOffsetForIndex_internal(unsigned char *font_collection, int index) +{ + // if it's just a font, there's only one valid index + if (stbtt__isfont(font_collection)) + return index == 0 ? 0 : -1; + + // check if it's a TTC + if (stbtt_tag(font_collection, "ttcf")) { + // version 1? + if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { + stbtt_int32 n = ttLONG(font_collection+8); + if (index >= n) + return -1; + return ttULONG(font_collection+12+index*4); + } + } + return -1; +} + +static int stbtt_GetNumberOfFonts_internal(unsigned char *font_collection) +{ + // if it's just a font, there's only one valid font + if (stbtt__isfont(font_collection)) + return 1; + + // check if it's a TTC + if (stbtt_tag(font_collection, "ttcf")) { + // version 1? + if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { + return ttLONG(font_collection+8); + } + } + return 0; +} + +static stbtt__buf stbtt__get_subrs(stbtt__buf cff, stbtt__buf fontdict) +{ + stbtt_uint32 subrsoff = 0, private_loc[2] = { 0, 0 }; + stbtt__buf pdict; + stbtt__dict_get_ints(&fontdict, 18, 2, private_loc); + if (!private_loc[1] || !private_loc[0]) return stbtt__new_buf(NULL, 0); + pdict = stbtt__buf_range(&cff, private_loc[1], private_loc[0]); + stbtt__dict_get_ints(&pdict, 19, 1, &subrsoff); + if (!subrsoff) return stbtt__new_buf(NULL, 0); + stbtt__buf_seek(&cff, private_loc[1]+subrsoff); + return stbtt__cff_get_index(&cff); +} + +// since most people won't use this, find this table the first time it's needed +static int stbtt__get_svg(stbtt_fontinfo *info) +{ + stbtt_uint32 t; + if (info->svg < 0) { + t = stbtt__find_table(info->data, info->fontstart, "SVG "); + if (t) { + stbtt_uint32 offset = ttULONG(info->data + t + 2); + info->svg = t + offset; + } else { + info->svg = 0; + } + } + return info->svg; +} + +static int stbtt_InitFont_internal(stbtt_fontinfo *info, unsigned char *data, int fontstart) +{ + stbtt_uint32 cmap, t; + stbtt_int32 i,numTables; + + info->data = data; + info->fontstart = fontstart; + info->cff = stbtt__new_buf(NULL, 0); + + cmap = stbtt__find_table(data, fontstart, "cmap"); // required + info->loca = stbtt__find_table(data, fontstart, "loca"); // required + info->head = stbtt__find_table(data, fontstart, "head"); // required + info->glyf = stbtt__find_table(data, fontstart, "glyf"); // required + info->hhea = stbtt__find_table(data, fontstart, "hhea"); // required + info->hmtx = stbtt__find_table(data, fontstart, "hmtx"); // required + info->kern = stbtt__find_table(data, fontstart, "kern"); // not required + info->gpos = stbtt__find_table(data, fontstart, "GPOS"); // not required + + if (!cmap || !info->head || !info->hhea || !info->hmtx) + return 0; + if (info->glyf) { + // required for truetype + if (!info->loca) return 0; + } else { + // initialization for CFF / Type2 fonts (OTF) + stbtt__buf b, topdict, topdictidx; + stbtt_uint32 cstype = 2, charstrings = 0, fdarrayoff = 0, fdselectoff = 0; + stbtt_uint32 cff; + + cff = stbtt__find_table(data, fontstart, "CFF "); + if (!cff) return 0; + + info->fontdicts = stbtt__new_buf(NULL, 0); + info->fdselect = stbtt__new_buf(NULL, 0); + + // @TODO this should use size from table (not 512MB) + info->cff = stbtt__new_buf(data+cff, 512*1024*1024); + b = info->cff; + + // read the header + stbtt__buf_skip(&b, 2); + stbtt__buf_seek(&b, stbtt__buf_get8(&b)); // hdrsize + + // @TODO the name INDEX could list multiple fonts, + // but we just use the first one. + stbtt__cff_get_index(&b); // name INDEX + topdictidx = stbtt__cff_get_index(&b); + topdict = stbtt__cff_index_get(topdictidx, 0); + stbtt__cff_get_index(&b); // string INDEX + info->gsubrs = stbtt__cff_get_index(&b); + + stbtt__dict_get_ints(&topdict, 17, 1, &charstrings); + stbtt__dict_get_ints(&topdict, 0x100 | 6, 1, &cstype); + stbtt__dict_get_ints(&topdict, 0x100 | 36, 1, &fdarrayoff); + stbtt__dict_get_ints(&topdict, 0x100 | 37, 1, &fdselectoff); + info->subrs = stbtt__get_subrs(b, topdict); + + // we only support Type 2 charstrings + if (cstype != 2) return 0; + if (charstrings == 0) return 0; + + if (fdarrayoff) { + // looks like a CID font + if (!fdselectoff) return 0; + stbtt__buf_seek(&b, fdarrayoff); + info->fontdicts = stbtt__cff_get_index(&b); + info->fdselect = stbtt__buf_range(&b, fdselectoff, b.size-fdselectoff); + } + + stbtt__buf_seek(&b, charstrings); + info->charstrings = stbtt__cff_get_index(&b); + } + + t = stbtt__find_table(data, fontstart, "maxp"); + if (t) + info->numGlyphs = ttUSHORT(data+t+4); + else + info->numGlyphs = 0xffff; + + info->svg = -1; + + // find a cmap encoding table we understand *now* to avoid searching + // later. (todo: could make this installable) + // the same regardless of glyph. + numTables = ttUSHORT(data + cmap + 2); + info->index_map = 0; + for (i=0; i < numTables; ++i) { + stbtt_uint32 encoding_record = cmap + 4 + 8 * i; + // find an encoding we understand: + switch(ttUSHORT(data+encoding_record)) { + case STBTT_PLATFORM_ID_MICROSOFT: + switch (ttUSHORT(data+encoding_record+2)) { + case STBTT_MS_EID_UNICODE_BMP: + case STBTT_MS_EID_UNICODE_FULL: + // MS/Unicode + info->index_map = cmap + ttULONG(data+encoding_record+4); + break; + } + break; + case STBTT_PLATFORM_ID_UNICODE: + // Mac/iOS has these + // all the encodingIDs are unicode, so we don't bother to check it + info->index_map = cmap + ttULONG(data+encoding_record+4); + break; + } + } + if (info->index_map == 0) + return 0; + + info->indexToLocFormat = ttUSHORT(data+info->head + 50); + return 1; +} + +STBTT_DEF int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint) +{ + stbtt_uint8 *data = info->data; + stbtt_uint32 index_map = info->index_map; + + stbtt_uint16 format = ttUSHORT(data + index_map + 0); + if (format == 0) { // apple byte encoding + stbtt_int32 bytes = ttUSHORT(data + index_map + 2); + if (unicode_codepoint < bytes-6) + return ttBYTE(data + index_map + 6 + unicode_codepoint); + return 0; + } else if (format == 6) { + stbtt_uint32 first = ttUSHORT(data + index_map + 6); + stbtt_uint32 count = ttUSHORT(data + index_map + 8); + if ((stbtt_uint32) unicode_codepoint >= first && (stbtt_uint32) unicode_codepoint < first+count) + return ttUSHORT(data + index_map + 10 + (unicode_codepoint - first)*2); + return 0; + } else if (format == 2) { + STBTT_assert(0); // @TODO: high-byte mapping for japanese/chinese/korean + return 0; + } else if (format == 4) { // standard mapping for windows fonts: binary search collection of ranges + stbtt_uint16 segcount = ttUSHORT(data+index_map+6) >> 1; + stbtt_uint16 searchRange = ttUSHORT(data+index_map+8) >> 1; + stbtt_uint16 entrySelector = ttUSHORT(data+index_map+10); + stbtt_uint16 rangeShift = ttUSHORT(data+index_map+12) >> 1; + + // do a binary search of the segments + stbtt_uint32 endCount = index_map + 14; + stbtt_uint32 search = endCount; + + if (unicode_codepoint > 0xffff) + return 0; + + // they lie from endCount .. endCount + segCount + // but searchRange is the nearest power of two, so... + if (unicode_codepoint >= ttUSHORT(data + search + rangeShift*2)) + search += rangeShift*2; + + // now decrement to bias correctly to find smallest + search -= 2; + while (entrySelector) { + stbtt_uint16 end; + searchRange >>= 1; + end = ttUSHORT(data + search + searchRange*2); + if (unicode_codepoint > end) + search += searchRange*2; + --entrySelector; + } + search += 2; + + { + stbtt_uint16 offset, start, last; + stbtt_uint16 item = (stbtt_uint16) ((search - endCount) >> 1); + + start = ttUSHORT(data + index_map + 14 + segcount*2 + 2 + 2*item); + last = ttUSHORT(data + endCount + 2*item); + if (unicode_codepoint < start || unicode_codepoint > last) + return 0; + + offset = ttUSHORT(data + index_map + 14 + segcount*6 + 2 + 2*item); + if (offset == 0) + return (stbtt_uint16) (unicode_codepoint + ttSHORT(data + index_map + 14 + segcount*4 + 2 + 2*item)); + + return ttUSHORT(data + offset + (unicode_codepoint-start)*2 + index_map + 14 + segcount*6 + 2 + 2*item); + } + } else if (format == 12 || format == 13) { + stbtt_uint32 ngroups = ttULONG(data+index_map+12); + stbtt_int32 low,high; + low = 0; high = (stbtt_int32)ngroups; + // Binary search the right group. + while (low < high) { + stbtt_int32 mid = low + ((high-low) >> 1); // rounds down, so low <= mid < high + stbtt_uint32 start_char = ttULONG(data+index_map+16+mid*12); + stbtt_uint32 end_char = ttULONG(data+index_map+16+mid*12+4); + if ((stbtt_uint32) unicode_codepoint < start_char) + high = mid; + else if ((stbtt_uint32) unicode_codepoint > end_char) + low = mid+1; + else { + stbtt_uint32 start_glyph = ttULONG(data+index_map+16+mid*12+8); + if (format == 12) + return start_glyph + unicode_codepoint-start_char; + else // format == 13 + return start_glyph; + } + } + return 0; // not found + } + // @TODO + STBTT_assert(0); + return 0; +} + +STBTT_DEF int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices) +{ + return stbtt_GetGlyphShape(info, stbtt_FindGlyphIndex(info, unicode_codepoint), vertices); +} + +static void stbtt_setvertex(stbtt_vertex *v, stbtt_uint8 type, stbtt_int32 x, stbtt_int32 y, stbtt_int32 cx, stbtt_int32 cy) +{ + v->type = type; + v->x = (stbtt_int16) x; + v->y = (stbtt_int16) y; + v->cx = (stbtt_int16) cx; + v->cy = (stbtt_int16) cy; +} + +static int stbtt__GetGlyfOffset(const stbtt_fontinfo *info, int glyph_index) +{ + int g1,g2; + + STBTT_assert(!info->cff.size); + + if (glyph_index >= info->numGlyphs) return -1; // glyph index out of range + if (info->indexToLocFormat >= 2) return -1; // unknown index->glyph map format + + if (info->indexToLocFormat == 0) { + g1 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2) * 2; + g2 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2 + 2) * 2; + } else { + g1 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4); + g2 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4 + 4); + } + + return g1==g2 ? -1 : g1; // if length is 0, return -1 +} + +static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); + +STBTT_DEF int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) +{ + if (info->cff.size) { + stbtt__GetGlyphInfoT2(info, glyph_index, x0, y0, x1, y1); + } else { + int g = stbtt__GetGlyfOffset(info, glyph_index); + if (g < 0) return 0; + + if (x0) *x0 = ttSHORT(info->data + g + 2); + if (y0) *y0 = ttSHORT(info->data + g + 4); + if (x1) *x1 = ttSHORT(info->data + g + 6); + if (y1) *y1 = ttSHORT(info->data + g + 8); + } + return 1; +} + +STBTT_DEF int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1) +{ + return stbtt_GetGlyphBox(info, stbtt_FindGlyphIndex(info,codepoint), x0,y0,x1,y1); +} + +STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index) +{ + stbtt_int16 numberOfContours; + int g; + if (info->cff.size) + return stbtt__GetGlyphInfoT2(info, glyph_index, NULL, NULL, NULL, NULL) == 0; + g = stbtt__GetGlyfOffset(info, glyph_index); + if (g < 0) return 1; + numberOfContours = ttSHORT(info->data + g); + return numberOfContours == 0; +} + +static int stbtt__close_shape(stbtt_vertex *vertices, int num_vertices, int was_off, int start_off, + stbtt_int32 sx, stbtt_int32 sy, stbtt_int32 scx, stbtt_int32 scy, stbtt_int32 cx, stbtt_int32 cy) +{ + if (start_off) { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+scx)>>1, (cy+scy)>>1, cx,cy); + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, sx,sy,scx,scy); + } else { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve,sx,sy,cx,cy); + else + stbtt_setvertex(&vertices[num_vertices++], STBTT_vline,sx,sy,0,0); + } + return num_vertices; +} + +static int stbtt__GetGlyphShapeTT(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + stbtt_int16 numberOfContours; + stbtt_uint8 *endPtsOfContours; + stbtt_uint8 *data = info->data; + stbtt_vertex *vertices=0; + int num_vertices=0; + int g = stbtt__GetGlyfOffset(info, glyph_index); + + *pvertices = NULL; + + if (g < 0) return 0; + + numberOfContours = ttSHORT(data + g); + + if (numberOfContours > 0) { + stbtt_uint8 flags=0,flagcount; + stbtt_int32 ins, i,j=0,m,n, next_move, was_off=0, off, start_off=0; + stbtt_int32 x,y,cx,cy,sx,sy, scx,scy; + stbtt_uint8 *points; + endPtsOfContours = (data + g + 10); + ins = ttUSHORT(data + g + 10 + numberOfContours * 2); + points = data + g + 10 + numberOfContours * 2 + 2 + ins; + + n = 1+ttUSHORT(endPtsOfContours + numberOfContours*2-2); + + m = n + 2*numberOfContours; // a loose bound on how many vertices we might need + vertices = (stbtt_vertex *) STBTT_malloc(m * sizeof(vertices[0]), info->userdata); + if (vertices == 0) + return 0; + + next_move = 0; + flagcount=0; + + // in first pass, we load uninterpreted data into the allocated array + // above, shifted to the end of the array so we won't overwrite it when + // we create our final data starting from the front + + off = m - n; // starting offset for uninterpreted data, regardless of how m ends up being calculated + + // first load flags + + for (i=0; i < n; ++i) { + if (flagcount == 0) { + flags = *points++; + if (flags & 8) + flagcount = *points++; + } else + --flagcount; + vertices[off+i].type = flags; + } + + // now load x coordinates + x=0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + if (flags & 2) { + stbtt_int16 dx = *points++; + x += (flags & 16) ? dx : -dx; // ??? + } else { + if (!(flags & 16)) { + x = x + (stbtt_int16) (points[0]*256 + points[1]); + points += 2; + } + } + vertices[off+i].x = (stbtt_int16) x; + } + + // now load y coordinates + y=0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + if (flags & 4) { + stbtt_int16 dy = *points++; + y += (flags & 32) ? dy : -dy; // ??? + } else { + if (!(flags & 32)) { + y = y + (stbtt_int16) (points[0]*256 + points[1]); + points += 2; + } + } + vertices[off+i].y = (stbtt_int16) y; + } + + // now convert them to our format + num_vertices=0; + sx = sy = cx = cy = scx = scy = 0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + x = (stbtt_int16) vertices[off+i].x; + y = (stbtt_int16) vertices[off+i].y; + + if (next_move == i) { + if (i != 0) + num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); + + // now start the new one + start_off = !(flags & 1); + if (start_off) { + // if we start off with an off-curve point, then when we need to find a point on the curve + // where we can start, and we need to save some state for when we wraparound. + scx = x; + scy = y; + if (!(vertices[off+i+1].type & 1)) { + // next point is also a curve point, so interpolate an on-point curve + sx = (x + (stbtt_int32) vertices[off+i+1].x) >> 1; + sy = (y + (stbtt_int32) vertices[off+i+1].y) >> 1; + } else { + // otherwise just use the next point as our start point + sx = (stbtt_int32) vertices[off+i+1].x; + sy = (stbtt_int32) vertices[off+i+1].y; + ++i; // we're using point i+1 as the starting point, so skip it + } + } else { + sx = x; + sy = y; + } + stbtt_setvertex(&vertices[num_vertices++], STBTT_vmove,sx,sy,0,0); + was_off = 0; + next_move = 1 + ttUSHORT(endPtsOfContours+j*2); + ++j; + } else { + if (!(flags & 1)) { // if it's a curve + if (was_off) // two off-curve control points in a row means interpolate an on-curve midpoint + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+x)>>1, (cy+y)>>1, cx, cy); + cx = x; + cy = y; + was_off = 1; + } else { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, x,y, cx, cy); + else + stbtt_setvertex(&vertices[num_vertices++], STBTT_vline, x,y,0,0); + was_off = 0; + } + } + } + num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); + } else if (numberOfContours < 0) { + // Compound shapes. + int more = 1; + stbtt_uint8 *comp = data + g + 10; + num_vertices = 0; + vertices = 0; + while (more) { + stbtt_uint16 flags, gidx; + int comp_num_verts = 0, i; + stbtt_vertex *comp_verts = 0, *tmp = 0; + float mtx[6] = {1,0,0,1,0,0}, m, n; + + flags = ttSHORT(comp); comp+=2; + gidx = ttSHORT(comp); comp+=2; + + if (flags & 2) { // XY values + if (flags & 1) { // shorts + mtx[4] = ttSHORT(comp); comp+=2; + mtx[5] = ttSHORT(comp); comp+=2; + } else { + mtx[4] = ttCHAR(comp); comp+=1; + mtx[5] = ttCHAR(comp); comp+=1; + } + } + else { + // @TODO handle matching point + STBTT_assert(0); + } + if (flags & (1<<3)) { // WE_HAVE_A_SCALE + mtx[0] = mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = mtx[2] = 0; + } else if (flags & (1<<6)) { // WE_HAVE_AN_X_AND_YSCALE + mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = mtx[2] = 0; + mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + } else if (flags & (1<<7)) { // WE_HAVE_A_TWO_BY_TWO + mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[2] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + } + + // Find transformation scales. + m = (float) STBTT_sqrt(mtx[0]*mtx[0] + mtx[1]*mtx[1]); + n = (float) STBTT_sqrt(mtx[2]*mtx[2] + mtx[3]*mtx[3]); + + // Get indexed glyph. + comp_num_verts = stbtt_GetGlyphShape(info, gidx, &comp_verts); + if (comp_num_verts > 0) { + // Transform vertices. + for (i = 0; i < comp_num_verts; ++i) { + stbtt_vertex* v = &comp_verts[i]; + stbtt_vertex_type x,y; + x=v->x; y=v->y; + v->x = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); + v->y = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); + x=v->cx; y=v->cy; + v->cx = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); + v->cy = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); + } + // Append vertices. + tmp = (stbtt_vertex*)STBTT_malloc((num_vertices+comp_num_verts)*sizeof(stbtt_vertex), info->userdata); + if (!tmp) { + if (vertices) STBTT_free(vertices, info->userdata); + if (comp_verts) STBTT_free(comp_verts, info->userdata); + return 0; + } + if (num_vertices > 0 && vertices) STBTT_memcpy(tmp, vertices, num_vertices*sizeof(stbtt_vertex)); + STBTT_memcpy(tmp+num_vertices, comp_verts, comp_num_verts*sizeof(stbtt_vertex)); + if (vertices) STBTT_free(vertices, info->userdata); + vertices = tmp; + STBTT_free(comp_verts, info->userdata); + num_vertices += comp_num_verts; + } + // More components ? + more = flags & (1<<5); + } + } else { + // numberOfCounters == 0, do nothing + } + + *pvertices = vertices; + return num_vertices; +} + +typedef struct +{ + int bounds; + int started; + float first_x, first_y; + float x, y; + stbtt_int32 min_x, max_x, min_y, max_y; + + stbtt_vertex *pvertices; + int num_vertices; +} stbtt__csctx; + +#define STBTT__CSCTX_INIT(bounds) {bounds,0, 0,0, 0,0, 0,0,0,0, NULL, 0} + +static void stbtt__track_vertex(stbtt__csctx *c, stbtt_int32 x, stbtt_int32 y) +{ + if (x > c->max_x || !c->started) c->max_x = x; + if (y > c->max_y || !c->started) c->max_y = y; + if (x < c->min_x || !c->started) c->min_x = x; + if (y < c->min_y || !c->started) c->min_y = y; + c->started = 1; +} + +static void stbtt__csctx_v(stbtt__csctx *c, stbtt_uint8 type, stbtt_int32 x, stbtt_int32 y, stbtt_int32 cx, stbtt_int32 cy, stbtt_int32 cx1, stbtt_int32 cy1) +{ + if (c->bounds) { + stbtt__track_vertex(c, x, y); + if (type == STBTT_vcubic) { + stbtt__track_vertex(c, cx, cy); + stbtt__track_vertex(c, cx1, cy1); + } + } else { + stbtt_setvertex(&c->pvertices[c->num_vertices], type, x, y, cx, cy); + c->pvertices[c->num_vertices].cx1 = (stbtt_int16) cx1; + c->pvertices[c->num_vertices].cy1 = (stbtt_int16) cy1; + } + c->num_vertices++; +} + +static void stbtt__csctx_close_shape(stbtt__csctx *ctx) +{ + if (ctx->first_x != ctx->x || ctx->first_y != ctx->y) + stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->first_x, (int)ctx->first_y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rmove_to(stbtt__csctx *ctx, float dx, float dy) +{ + stbtt__csctx_close_shape(ctx); + ctx->first_x = ctx->x = ctx->x + dx; + ctx->first_y = ctx->y = ctx->y + dy; + stbtt__csctx_v(ctx, STBTT_vmove, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rline_to(stbtt__csctx *ctx, float dx, float dy) +{ + ctx->x += dx; + ctx->y += dy; + stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rccurve_to(stbtt__csctx *ctx, float dx1, float dy1, float dx2, float dy2, float dx3, float dy3) +{ + float cx1 = ctx->x + dx1; + float cy1 = ctx->y + dy1; + float cx2 = cx1 + dx2; + float cy2 = cy1 + dy2; + ctx->x = cx2 + dx3; + ctx->y = cy2 + dy3; + stbtt__csctx_v(ctx, STBTT_vcubic, (int)ctx->x, (int)ctx->y, (int)cx1, (int)cy1, (int)cx2, (int)cy2); +} + +static stbtt__buf stbtt__get_subr(stbtt__buf idx, int n) +{ + int count = stbtt__cff_index_count(&idx); + int bias = 107; + if (count >= 33900) + bias = 32768; + else if (count >= 1240) + bias = 1131; + n += bias; + if (n < 0 || n >= count) + return stbtt__new_buf(NULL, 0); + return stbtt__cff_index_get(idx, n); +} + +static stbtt__buf stbtt__cid_get_glyph_subrs(const stbtt_fontinfo *info, int glyph_index) +{ + stbtt__buf fdselect = info->fdselect; + int nranges, start, end, v, fmt, fdselector = -1, i; + + stbtt__buf_seek(&fdselect, 0); + fmt = stbtt__buf_get8(&fdselect); + if (fmt == 0) { + // untested + stbtt__buf_skip(&fdselect, glyph_index); + fdselector = stbtt__buf_get8(&fdselect); + } else if (fmt == 3) { + nranges = stbtt__buf_get16(&fdselect); + start = stbtt__buf_get16(&fdselect); + for (i = 0; i < nranges; i++) { + v = stbtt__buf_get8(&fdselect); + end = stbtt__buf_get16(&fdselect); + if (glyph_index >= start && glyph_index < end) { + fdselector = v; + break; + } + start = end; + } + } + if (fdselector == -1) stbtt__new_buf(NULL, 0); + return stbtt__get_subrs(info->cff, stbtt__cff_index_get(info->fontdicts, fdselector)); +} + +static int stbtt__run_charstring(const stbtt_fontinfo *info, int glyph_index, stbtt__csctx *c) +{ + int in_header = 1, maskbits = 0, subr_stack_height = 0, sp = 0, v, i, b0; + int has_subrs = 0, clear_stack; + float s[48]; + stbtt__buf subr_stack[10], subrs = info->subrs, b; + float f; + +#define STBTT__CSERR(s) (0) + + // this currently ignores the initial width value, which isn't needed if we have hmtx + b = stbtt__cff_index_get(info->charstrings, glyph_index); + while (b.cursor < b.size) { + i = 0; + clear_stack = 1; + b0 = stbtt__buf_get8(&b); + switch (b0) { + // @TODO implement hinting + case 0x13: // hintmask + case 0x14: // cntrmask + if (in_header) + maskbits += (sp / 2); // implicit "vstem" + in_header = 0; + stbtt__buf_skip(&b, (maskbits + 7) / 8); + break; + + case 0x01: // hstem + case 0x03: // vstem + case 0x12: // hstemhm + case 0x17: // vstemhm + maskbits += (sp / 2); + break; + + case 0x15: // rmoveto + in_header = 0; + if (sp < 2) return STBTT__CSERR("rmoveto stack"); + stbtt__csctx_rmove_to(c, s[sp-2], s[sp-1]); + break; + case 0x04: // vmoveto + in_header = 0; + if (sp < 1) return STBTT__CSERR("vmoveto stack"); + stbtt__csctx_rmove_to(c, 0, s[sp-1]); + break; + case 0x16: // hmoveto + in_header = 0; + if (sp < 1) return STBTT__CSERR("hmoveto stack"); + stbtt__csctx_rmove_to(c, s[sp-1], 0); + break; + + case 0x05: // rlineto + if (sp < 2) return STBTT__CSERR("rlineto stack"); + for (; i + 1 < sp; i += 2) + stbtt__csctx_rline_to(c, s[i], s[i+1]); + break; + + // hlineto/vlineto and vhcurveto/hvcurveto alternate horizontal and vertical + // starting from a different place. + + case 0x07: // vlineto + if (sp < 1) return STBTT__CSERR("vlineto stack"); + goto vlineto; + case 0x06: // hlineto + if (sp < 1) return STBTT__CSERR("hlineto stack"); + for (;;) { + if (i >= sp) break; + stbtt__csctx_rline_to(c, s[i], 0); + i++; + vlineto: + if (i >= sp) break; + stbtt__csctx_rline_to(c, 0, s[i]); + i++; + } + break; + + case 0x1F: // hvcurveto + if (sp < 4) return STBTT__CSERR("hvcurveto stack"); + goto hvcurveto; + case 0x1E: // vhcurveto + if (sp < 4) return STBTT__CSERR("vhcurveto stack"); + for (;;) { + if (i + 3 >= sp) break; + stbtt__csctx_rccurve_to(c, 0, s[i], s[i+1], s[i+2], s[i+3], (sp - i == 5) ? s[i + 4] : 0.0f); + i += 4; + hvcurveto: + if (i + 3 >= sp) break; + stbtt__csctx_rccurve_to(c, s[i], 0, s[i+1], s[i+2], (sp - i == 5) ? s[i+4] : 0.0f, s[i+3]); + i += 4; + } + break; + + case 0x08: // rrcurveto + if (sp < 6) return STBTT__CSERR("rcurveline stack"); + for (; i + 5 < sp; i += 6) + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + break; + + case 0x18: // rcurveline + if (sp < 8) return STBTT__CSERR("rcurveline stack"); + for (; i + 5 < sp - 2; i += 6) + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + if (i + 1 >= sp) return STBTT__CSERR("rcurveline stack"); + stbtt__csctx_rline_to(c, s[i], s[i+1]); + break; + + case 0x19: // rlinecurve + if (sp < 8) return STBTT__CSERR("rlinecurve stack"); + for (; i + 1 < sp - 6; i += 2) + stbtt__csctx_rline_to(c, s[i], s[i+1]); + if (i + 5 >= sp) return STBTT__CSERR("rlinecurve stack"); + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + break; + + case 0x1A: // vvcurveto + case 0x1B: // hhcurveto + if (sp < 4) return STBTT__CSERR("(vv|hh)curveto stack"); + f = 0.0; + if (sp & 1) { f = s[i]; i++; } + for (; i + 3 < sp; i += 4) { + if (b0 == 0x1B) + stbtt__csctx_rccurve_to(c, s[i], f, s[i+1], s[i+2], s[i+3], 0.0); + else + stbtt__csctx_rccurve_to(c, f, s[i], s[i+1], s[i+2], 0.0, s[i+3]); + f = 0.0; + } + break; + + case 0x0A: // callsubr + if (!has_subrs) { + if (info->fdselect.size) + subrs = stbtt__cid_get_glyph_subrs(info, glyph_index); + has_subrs = 1; + } + // FALLTHROUGH + case 0x1D: // callgsubr + if (sp < 1) return STBTT__CSERR("call(g|)subr stack"); + v = (int) s[--sp]; + if (subr_stack_height >= 10) return STBTT__CSERR("recursion limit"); + subr_stack[subr_stack_height++] = b; + b = stbtt__get_subr(b0 == 0x0A ? subrs : info->gsubrs, v); + if (b.size == 0) return STBTT__CSERR("subr not found"); + b.cursor = 0; + clear_stack = 0; + break; + + case 0x0B: // return + if (subr_stack_height <= 0) return STBTT__CSERR("return outside subr"); + b = subr_stack[--subr_stack_height]; + clear_stack = 0; + break; + + case 0x0E: // endchar + stbtt__csctx_close_shape(c); + return 1; + + case 0x0C: { // two-byte escape + float dx1, dx2, dx3, dx4, dx5, dx6, dy1, dy2, dy3, dy4, dy5, dy6; + float dx, dy; + int b1 = stbtt__buf_get8(&b); + switch (b1) { + // @TODO These "flex" implementations ignore the flex-depth and resolution, + // and always draw beziers. + case 0x22: // hflex + if (sp < 7) return STBTT__CSERR("hflex stack"); + dx1 = s[0]; + dx2 = s[1]; + dy2 = s[2]; + dx3 = s[3]; + dx4 = s[4]; + dx5 = s[5]; + dx6 = s[6]; + stbtt__csctx_rccurve_to(c, dx1, 0, dx2, dy2, dx3, 0); + stbtt__csctx_rccurve_to(c, dx4, 0, dx5, -dy2, dx6, 0); + break; + + case 0x23: // flex + if (sp < 13) return STBTT__CSERR("flex stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dy3 = s[5]; + dx4 = s[6]; + dy4 = s[7]; + dx5 = s[8]; + dy5 = s[9]; + dx6 = s[10]; + dy6 = s[11]; + //fd is s[12] + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); + stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); + break; + + case 0x24: // hflex1 + if (sp < 9) return STBTT__CSERR("hflex1 stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dx4 = s[5]; + dx5 = s[6]; + dy5 = s[7]; + dx6 = s[8]; + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, 0); + stbtt__csctx_rccurve_to(c, dx4, 0, dx5, dy5, dx6, -(dy1+dy2+dy5)); + break; + + case 0x25: // flex1 + if (sp < 11) return STBTT__CSERR("flex1 stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dy3 = s[5]; + dx4 = s[6]; + dy4 = s[7]; + dx5 = s[8]; + dy5 = s[9]; + dx6 = dy6 = s[10]; + dx = dx1+dx2+dx3+dx4+dx5; + dy = dy1+dy2+dy3+dy4+dy5; + if (STBTT_fabs(dx) > STBTT_fabs(dy)) + dy6 = -dy; + else + dx6 = -dx; + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); + stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); + break; + + default: + return STBTT__CSERR("unimplemented"); + } + } break; + + default: + if (b0 != 255 && b0 != 28 && b0 < 32) + return STBTT__CSERR("reserved operator"); + + // push immediate + if (b0 == 255) { + f = (float)(stbtt_int32)stbtt__buf_get32(&b) / 0x10000; + } else { + stbtt__buf_skip(&b, -1); + f = (float)(stbtt_int16)stbtt__cff_int(&b); + } + if (sp >= 48) return STBTT__CSERR("push stack overflow"); + s[sp++] = f; + clear_stack = 0; + break; + } + if (clear_stack) sp = 0; + } + return STBTT__CSERR("no endchar"); + +#undef STBTT__CSERR +} + +static int stbtt__GetGlyphShapeT2(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + // runs the charstring twice, once to count and once to output (to avoid realloc) + stbtt__csctx count_ctx = STBTT__CSCTX_INIT(1); + stbtt__csctx output_ctx = STBTT__CSCTX_INIT(0); + if (stbtt__run_charstring(info, glyph_index, &count_ctx)) { + *pvertices = (stbtt_vertex*)STBTT_malloc(count_ctx.num_vertices*sizeof(stbtt_vertex), info->userdata); + output_ctx.pvertices = *pvertices; + if (stbtt__run_charstring(info, glyph_index, &output_ctx)) { + STBTT_assert(output_ctx.num_vertices == count_ctx.num_vertices); + return output_ctx.num_vertices; + } + } + *pvertices = NULL; + return 0; +} + +static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) +{ + stbtt__csctx c = STBTT__CSCTX_INIT(1); + int r = stbtt__run_charstring(info, glyph_index, &c); + if (x0) *x0 = r ? c.min_x : 0; + if (y0) *y0 = r ? c.min_y : 0; + if (x1) *x1 = r ? c.max_x : 0; + if (y1) *y1 = r ? c.max_y : 0; + return r ? c.num_vertices : 0; +} + +STBTT_DEF int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + if (!info->cff.size) + return stbtt__GetGlyphShapeTT(info, glyph_index, pvertices); + else + return stbtt__GetGlyphShapeT2(info, glyph_index, pvertices); +} + +STBTT_DEF void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing) +{ + stbtt_uint16 numOfLongHorMetrics = ttUSHORT(info->data+info->hhea + 34); + if (glyph_index < numOfLongHorMetrics) { + if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*glyph_index); + if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*glyph_index + 2); + } else { + if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*(numOfLongHorMetrics-1)); + if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*numOfLongHorMetrics + 2*(glyph_index - numOfLongHorMetrics)); + } +} + +STBTT_DEF int stbtt_GetKerningTableLength(const stbtt_fontinfo *info) +{ + stbtt_uint8 *data = info->data + info->kern; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + return ttUSHORT(data+10); +} + +STBTT_DEF int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length) +{ + stbtt_uint8 *data = info->data + info->kern; + int k, length; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + length = ttUSHORT(data+10); + if (table_length < length) + length = table_length; + + for (k = 0; k < length; k++) + { + table[k].glyph1 = ttUSHORT(data+18+(k*6)); + table[k].glyph2 = ttUSHORT(data+20+(k*6)); + table[k].advance = ttSHORT(data+22+(k*6)); + } + + return length; +} + +static int stbtt__GetGlyphKernInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) +{ + stbtt_uint8 *data = info->data + info->kern; + stbtt_uint32 needle, straw; + int l, r, m; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + l = 0; + r = ttUSHORT(data+10) - 1; + needle = glyph1 << 16 | glyph2; + while (l <= r) { + m = (l + r) >> 1; + straw = ttULONG(data+18+(m*6)); // note: unaligned read + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else + return ttSHORT(data+22+(m*6)); + } + return 0; +} + +static stbtt_int32 stbtt__GetCoverageIndex(stbtt_uint8 *coverageTable, int glyph) +{ + stbtt_uint16 coverageFormat = ttUSHORT(coverageTable); + switch (coverageFormat) { + case 1: { + stbtt_uint16 glyphCount = ttUSHORT(coverageTable + 2); + + // Binary search. + stbtt_int32 l=0, r=glyphCount-1, m; + int straw, needle=glyph; + while (l <= r) { + stbtt_uint8 *glyphArray = coverageTable + 4; + stbtt_uint16 glyphID; + m = (l + r) >> 1; + glyphID = ttUSHORT(glyphArray + 2 * m); + straw = glyphID; + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else { + return m; + } + } + break; + } + + case 2: { + stbtt_uint16 rangeCount = ttUSHORT(coverageTable + 2); + stbtt_uint8 *rangeArray = coverageTable + 4; + + // Binary search. + stbtt_int32 l=0, r=rangeCount-1, m; + int strawStart, strawEnd, needle=glyph; + while (l <= r) { + stbtt_uint8 *rangeRecord; + m = (l + r) >> 1; + rangeRecord = rangeArray + 6 * m; + strawStart = ttUSHORT(rangeRecord); + strawEnd = ttUSHORT(rangeRecord + 2); + if (needle < strawStart) + r = m - 1; + else if (needle > strawEnd) + l = m + 1; + else { + stbtt_uint16 startCoverageIndex = ttUSHORT(rangeRecord + 4); + return startCoverageIndex + glyph - strawStart; + } + } + break; + } + + default: return -1; // unsupported + } + + return -1; +} + +static stbtt_int32 stbtt__GetGlyphClass(stbtt_uint8 *classDefTable, int glyph) +{ + stbtt_uint16 classDefFormat = ttUSHORT(classDefTable); + switch (classDefFormat) + { + case 1: { + stbtt_uint16 startGlyphID = ttUSHORT(classDefTable + 2); + stbtt_uint16 glyphCount = ttUSHORT(classDefTable + 4); + stbtt_uint8 *classDef1ValueArray = classDefTable + 6; + + if (glyph >= startGlyphID && glyph < startGlyphID + glyphCount) + return (stbtt_int32)ttUSHORT(classDef1ValueArray + 2 * (glyph - startGlyphID)); + break; + } + + case 2: { + stbtt_uint16 classRangeCount = ttUSHORT(classDefTable + 2); + stbtt_uint8 *classRangeRecords = classDefTable + 4; + + // Binary search. + stbtt_int32 l=0, r=classRangeCount-1, m; + int strawStart, strawEnd, needle=glyph; + while (l <= r) { + stbtt_uint8 *classRangeRecord; + m = (l + r) >> 1; + classRangeRecord = classRangeRecords + 6 * m; + strawStart = ttUSHORT(classRangeRecord); + strawEnd = ttUSHORT(classRangeRecord + 2); + if (needle < strawStart) + r = m - 1; + else if (needle > strawEnd) + l = m + 1; + else + return (stbtt_int32)ttUSHORT(classRangeRecord + 4); + } + break; + } + + default: + return -1; // Unsupported definition type, return an error. + } + + // "All glyphs not assigned to a class fall into class 0". (OpenType spec) + return 0; +} + +// Define to STBTT_assert(x) if you want to break on unimplemented formats. +#define STBTT_GPOS_TODO_assert(x) + +static stbtt_int32 stbtt__GetGlyphGPOSInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) +{ + stbtt_uint16 lookupListOffset; + stbtt_uint8 *lookupList; + stbtt_uint16 lookupCount; + stbtt_uint8 *data; + stbtt_int32 i, sti; + + if (!info->gpos) return 0; + + data = info->data + info->gpos; + + if (ttUSHORT(data+0) != 1) return 0; // Major version 1 + if (ttUSHORT(data+2) != 0) return 0; // Minor version 0 + + lookupListOffset = ttUSHORT(data+8); + lookupList = data + lookupListOffset; + lookupCount = ttUSHORT(lookupList); + + for (i=0; i= pairSetCount) return 0; + + needle=glyph2; + r=pairValueCount-1; + l=0; + + // Binary search. + while (l <= r) { + stbtt_uint16 secondGlyph; + stbtt_uint8 *pairValue; + m = (l + r) >> 1; + pairValue = pairValueArray + (2 + valueRecordPairSizeInBytes) * m; + secondGlyph = ttUSHORT(pairValue); + straw = secondGlyph; + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else { + stbtt_int16 xAdvance = ttSHORT(pairValue + 2); + return xAdvance; + } + } + } else + return 0; + break; + } + + case 2: { + stbtt_uint16 valueFormat1 = ttUSHORT(table + 4); + stbtt_uint16 valueFormat2 = ttUSHORT(table + 6); + if (valueFormat1 == 4 && valueFormat2 == 0) { // Support more formats? + stbtt_uint16 classDef1Offset = ttUSHORT(table + 8); + stbtt_uint16 classDef2Offset = ttUSHORT(table + 10); + int glyph1class = stbtt__GetGlyphClass(table + classDef1Offset, glyph1); + int glyph2class = stbtt__GetGlyphClass(table + classDef2Offset, glyph2); + + stbtt_uint16 class1Count = ttUSHORT(table + 12); + stbtt_uint16 class2Count = ttUSHORT(table + 14); + stbtt_uint8 *class1Records, *class2Records; + stbtt_int16 xAdvance; + + if (glyph1class < 0 || glyph1class >= class1Count) return 0; // malformed + if (glyph2class < 0 || glyph2class >= class2Count) return 0; // malformed + + class1Records = table + 16; + class2Records = class1Records + 2 * (glyph1class * class2Count); + xAdvance = ttSHORT(class2Records + 2 * glyph2class); + return xAdvance; + } else + return 0; + break; + } + + default: + return 0; // Unsupported position format + } + } + } + + return 0; +} + +STBTT_DEF int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int g1, int g2) +{ + int xAdvance = 0; + + if (info->gpos) + xAdvance += stbtt__GetGlyphGPOSInfoAdvance(info, g1, g2); + else if (info->kern) + xAdvance += stbtt__GetGlyphKernInfoAdvance(info, g1, g2); + + return xAdvance; +} + +STBTT_DEF int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2) +{ + if (!info->kern && !info->gpos) // if no kerning table, don't waste time looking up both codepoint->glyphs + return 0; + return stbtt_GetGlyphKernAdvance(info, stbtt_FindGlyphIndex(info,ch1), stbtt_FindGlyphIndex(info,ch2)); +} + +STBTT_DEF void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing) +{ + stbtt_GetGlyphHMetrics(info, stbtt_FindGlyphIndex(info,codepoint), advanceWidth, leftSideBearing); +} + +STBTT_DEF void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap) +{ + if (ascent ) *ascent = ttSHORT(info->data+info->hhea + 4); + if (descent) *descent = ttSHORT(info->data+info->hhea + 6); + if (lineGap) *lineGap = ttSHORT(info->data+info->hhea + 8); +} + +STBTT_DEF int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap) +{ + int tab = stbtt__find_table(info->data, info->fontstart, "OS/2"); + if (!tab) + return 0; + if (typoAscent ) *typoAscent = ttSHORT(info->data+tab + 68); + if (typoDescent) *typoDescent = ttSHORT(info->data+tab + 70); + if (typoLineGap) *typoLineGap = ttSHORT(info->data+tab + 72); + return 1; +} + +STBTT_DEF void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1) +{ + *x0 = ttSHORT(info->data + info->head + 36); + *y0 = ttSHORT(info->data + info->head + 38); + *x1 = ttSHORT(info->data + info->head + 40); + *y1 = ttSHORT(info->data + info->head + 42); +} + +STBTT_DEF float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float height) +{ + int fheight = ttSHORT(info->data + info->hhea + 4) - ttSHORT(info->data + info->hhea + 6); + return (float) height / fheight; +} + +STBTT_DEF float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels) +{ + int unitsPerEm = ttUSHORT(info->data + info->head + 18); + return pixels / unitsPerEm; +} + +STBTT_DEF void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *v) +{ + STBTT_free(v, info->userdata); +} + +STBTT_DEF stbtt_uint8 *stbtt_FindSVGDoc(const stbtt_fontinfo *info, int gl) +{ + int i; + stbtt_uint8 *data = info->data; + stbtt_uint8 *svg_doc_list = data + stbtt__get_svg((stbtt_fontinfo *) info); + + int numEntries = ttUSHORT(svg_doc_list); + stbtt_uint8 *svg_docs = svg_doc_list + 2; + + for(i=0; i= ttUSHORT(svg_doc)) && (gl <= ttUSHORT(svg_doc + 2))) + return svg_doc; + } + return 0; +} + +STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg) +{ + stbtt_uint8 *data = info->data; + stbtt_uint8 *svg_doc; + + if (info->svg == 0) + return 0; + + svg_doc = stbtt_FindSVGDoc(info, gl); + if (svg_doc != NULL) { + *svg = (char *) data + info->svg + ttULONG(svg_doc + 4); + return ttULONG(svg_doc + 8); + } else { + return 0; + } +} + +STBTT_DEF int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg) +{ + return stbtt_GetGlyphSVG(info, stbtt_FindGlyphIndex(info, unicode_codepoint), svg); +} + +////////////////////////////////////////////////////////////////////////////// +// +// antialiasing software rasterizer +// + +STBTT_DEF void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y,float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + int x0=0,y0=0,x1,y1; // =0 suppresses compiler warning + if (!stbtt_GetGlyphBox(font, glyph, &x0,&y0,&x1,&y1)) { + // e.g. space character + if (ix0) *ix0 = 0; + if (iy0) *iy0 = 0; + if (ix1) *ix1 = 0; + if (iy1) *iy1 = 0; + } else { + // move to integral bboxes (treating pixels as little squares, what pixels get touched)? + if (ix0) *ix0 = STBTT_ifloor( x0 * scale_x + shift_x); + if (iy0) *iy0 = STBTT_ifloor(-y1 * scale_y + shift_y); + if (ix1) *ix1 = STBTT_iceil ( x1 * scale_x + shift_x); + if (iy1) *iy1 = STBTT_iceil (-y0 * scale_y + shift_y); + } +} + +STBTT_DEF void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetGlyphBitmapBoxSubpixel(font, glyph, scale_x, scale_y,0.0f,0.0f, ix0, iy0, ix1, iy1); +} + +STBTT_DEF void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetGlyphBitmapBoxSubpixel(font, stbtt_FindGlyphIndex(font,codepoint), scale_x, scale_y,shift_x,shift_y, ix0,iy0,ix1,iy1); +} + +STBTT_DEF void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetCodepointBitmapBoxSubpixel(font, codepoint, scale_x, scale_y,0.0f,0.0f, ix0,iy0,ix1,iy1); +} + +////////////////////////////////////////////////////////////////////////////// +// +// Rasterizer + +typedef struct stbtt__hheap_chunk +{ + struct stbtt__hheap_chunk *next; +} stbtt__hheap_chunk; + +typedef struct stbtt__hheap +{ + struct stbtt__hheap_chunk *head; + void *first_free; + int num_remaining_in_head_chunk; +} stbtt__hheap; + +static void *stbtt__hheap_alloc(stbtt__hheap *hh, size_t size, void *userdata) +{ + if (hh->first_free) { + void *p = hh->first_free; + hh->first_free = * (void **) p; + return p; + } else { + if (hh->num_remaining_in_head_chunk == 0) { + int count = (size < 32 ? 2000 : size < 128 ? 800 : 100); + stbtt__hheap_chunk *c = (stbtt__hheap_chunk *) STBTT_malloc(sizeof(stbtt__hheap_chunk) + size * count, userdata); + if (c == NULL) + return NULL; + c->next = hh->head; + hh->head = c; + hh->num_remaining_in_head_chunk = count; + } + --hh->num_remaining_in_head_chunk; + return (char *) (hh->head) + sizeof(stbtt__hheap_chunk) + size * hh->num_remaining_in_head_chunk; + } +} + +static void stbtt__hheap_free(stbtt__hheap *hh, void *p) +{ + *(void **) p = hh->first_free; + hh->first_free = p; +} + +static void stbtt__hheap_cleanup(stbtt__hheap *hh, void *userdata) +{ + stbtt__hheap_chunk *c = hh->head; + while (c) { + stbtt__hheap_chunk *n = c->next; + STBTT_free(c, userdata); + c = n; + } +} + +typedef struct stbtt__edge { + float x0,y0, x1,y1; + int invert; +} stbtt__edge; + + +typedef struct stbtt__active_edge +{ + struct stbtt__active_edge *next; + #if STBTT_RASTERIZER_VERSION==1 + int x,dx; + float ey; + int direction; + #elif STBTT_RASTERIZER_VERSION==2 + float fx,fdx,fdy; + float direction; + float sy; + float ey; + #else + #error "Unrecognized value of STBTT_RASTERIZER_VERSION" + #endif +} stbtt__active_edge; + +#if STBTT_RASTERIZER_VERSION == 1 +#define STBTT_FIXSHIFT 10 +#define STBTT_FIX (1 << STBTT_FIXSHIFT) +#define STBTT_FIXMASK (STBTT_FIX-1) + +static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) +{ + stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); + float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); + STBTT_assert(z != NULL); + if (!z) return z; + + // round dx down to avoid overshooting + if (dxdy < 0) + z->dx = -STBTT_ifloor(STBTT_FIX * -dxdy); + else + z->dx = STBTT_ifloor(STBTT_FIX * dxdy); + + z->x = STBTT_ifloor(STBTT_FIX * e->x0 + z->dx * (start_point - e->y0)); // use z->dx so when we offset later it's by the same amount + z->x -= off_x * STBTT_FIX; + + z->ey = e->y1; + z->next = 0; + z->direction = e->invert ? 1 : -1; + return z; +} +#elif STBTT_RASTERIZER_VERSION == 2 +static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) +{ + stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); + float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); + STBTT_assert(z != NULL); + //STBTT_assert(e->y0 <= start_point); + if (!z) return z; + z->fdx = dxdy; + z->fdy = dxdy != 0.0f ? (1.0f/dxdy) : 0.0f; + z->fx = e->x0 + dxdy * (start_point - e->y0); + z->fx -= off_x; + z->direction = e->invert ? 1.0f : -1.0f; + z->sy = e->y0; + z->ey = e->y1; + z->next = 0; + return z; +} +#else +#error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + +#if STBTT_RASTERIZER_VERSION == 1 +// note: this routine clips fills that extend off the edges... ideally this +// wouldn't happen, but it could happen if the truetype glyph bounding boxes +// are wrong, or if the user supplies a too-small bitmap +static void stbtt__fill_active_edges(unsigned char *scanline, int len, stbtt__active_edge *e, int max_weight) +{ + // non-zero winding fill + int x0=0, w=0; + + while (e) { + if (w == 0) { + // if we're currently at zero, we need to record the edge start point + x0 = e->x; w += e->direction; + } else { + int x1 = e->x; w += e->direction; + // if we went to zero, we need to draw + if (w == 0) { + int i = x0 >> STBTT_FIXSHIFT; + int j = x1 >> STBTT_FIXSHIFT; + + if (i < len && j >= 0) { + if (i == j) { + // x0,x1 are the same pixel, so compute combined coverage + scanline[i] = scanline[i] + (stbtt_uint8) ((x1 - x0) * max_weight >> STBTT_FIXSHIFT); + } else { + if (i >= 0) // add antialiasing for x0 + scanline[i] = scanline[i] + (stbtt_uint8) (((STBTT_FIX - (x0 & STBTT_FIXMASK)) * max_weight) >> STBTT_FIXSHIFT); + else + i = -1; // clip + + if (j < len) // add antialiasing for x1 + scanline[j] = scanline[j] + (stbtt_uint8) (((x1 & STBTT_FIXMASK) * max_weight) >> STBTT_FIXSHIFT); + else + j = len; // clip + + for (++i; i < j; ++i) // fill pixels between x0 and x1 + scanline[i] = scanline[i] + (stbtt_uint8) max_weight; + } + } + } + } + + e = e->next; + } +} + +static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) +{ + stbtt__hheap hh = { 0, 0, 0 }; + stbtt__active_edge *active = NULL; + int y,j=0; + int max_weight = (255 / vsubsample); // weight per vertical scanline + int s; // vertical subsample index + unsigned char scanline_data[512], *scanline; + + if (result->w > 512) + scanline = (unsigned char *) STBTT_malloc(result->w, userdata); + else + scanline = scanline_data; + + y = off_y * vsubsample; + e[n].y0 = (off_y + result->h) * (float) vsubsample + 1; + + while (j < result->h) { + STBTT_memset(scanline, 0, result->w); + for (s=0; s < vsubsample; ++s) { + // find center of pixel for this scanline + float scan_y = y + 0.5f; + stbtt__active_edge **step = &active; + + // update all active edges; + // remove all active edges that terminate before the center of this scanline + while (*step) { + stbtt__active_edge * z = *step; + if (z->ey <= scan_y) { + *step = z->next; // delete from list + STBTT_assert(z->direction); + z->direction = 0; + stbtt__hheap_free(&hh, z); + } else { + z->x += z->dx; // advance to position for current scanline + step = &((*step)->next); // advance through list + } + } + + // resort the list if needed + for(;;) { + int changed=0; + step = &active; + while (*step && (*step)->next) { + if ((*step)->x > (*step)->next->x) { + stbtt__active_edge *t = *step; + stbtt__active_edge *q = t->next; + + t->next = q->next; + q->next = t; + *step = q; + changed = 1; + } + step = &(*step)->next; + } + if (!changed) break; + } + + // insert all edges that start before the center of this scanline -- omit ones that also end on this scanline + while (e->y0 <= scan_y) { + if (e->y1 > scan_y) { + stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y, userdata); + if (z != NULL) { + // find insertion point + if (active == NULL) + active = z; + else if (z->x < active->x) { + // insert at front + z->next = active; + active = z; + } else { + // find thing to insert AFTER + stbtt__active_edge *p = active; + while (p->next && p->next->x < z->x) + p = p->next; + // at this point, p->next->x is NOT < z->x + z->next = p->next; + p->next = z; + } + } + } + ++e; + } + + // now process all active edges in XOR fashion + if (active) + stbtt__fill_active_edges(scanline, result->w, active, max_weight); + + ++y; + } + STBTT_memcpy(result->pixels + j * result->stride, scanline, result->w); + ++j; + } + + stbtt__hheap_cleanup(&hh, userdata); + + if (scanline != scanline_data) + STBTT_free(scanline, userdata); +} + +#elif STBTT_RASTERIZER_VERSION == 2 + +// the edge passed in here does not cross the vertical line at x or the vertical line at x+1 +// (i.e. it has already been clipped to those) +static void stbtt__handle_clipped_edge(float *scanline, int x, stbtt__active_edge *e, float x0, float y0, float x1, float y1) +{ + if (y0 == y1) return; + STBTT_assert(y0 < y1); + STBTT_assert(e->sy <= e->ey); + if (y0 > e->ey) return; + if (y1 < e->sy) return; + if (y0 < e->sy) { + x0 += (x1-x0) * (e->sy - y0) / (y1-y0); + y0 = e->sy; + } + if (y1 > e->ey) { + x1 += (x1-x0) * (e->ey - y1) / (y1-y0); + y1 = e->ey; + } + + if (x0 == x) + STBTT_assert(x1 <= x+1); + else if (x0 == x+1) + STBTT_assert(x1 >= x); + else if (x0 <= x) + STBTT_assert(x1 <= x); + else if (x0 >= x+1) + STBTT_assert(x1 >= x+1); + else + STBTT_assert(x1 >= x && x1 <= x+1); + + if (x0 <= x && x1 <= x) + scanline[x] += e->direction * (y1-y0); + else if (x0 >= x+1 && x1 >= x+1) + ; + else { + STBTT_assert(x0 >= x && x0 <= x+1 && x1 >= x && x1 <= x+1); + scanline[x] += e->direction * (y1-y0) * (1-((x0-x)+(x1-x))/2); // coverage = 1 - average x position + } +} + +static void stbtt__fill_active_edges_new(float *scanline, float *scanline_fill, int len, stbtt__active_edge *e, float y_top) +{ + float y_bottom = y_top+1; + + while (e) { + // brute force every pixel + + // compute intersection points with top & bottom + STBTT_assert(e->ey >= y_top); + + if (e->fdx == 0) { + float x0 = e->fx; + if (x0 < len) { + if (x0 >= 0) { + stbtt__handle_clipped_edge(scanline,(int) x0,e, x0,y_top, x0,y_bottom); + stbtt__handle_clipped_edge(scanline_fill-1,(int) x0+1,e, x0,y_top, x0,y_bottom); + } else { + stbtt__handle_clipped_edge(scanline_fill-1,0,e, x0,y_top, x0,y_bottom); + } + } + } else { + float x0 = e->fx; + float dx = e->fdx; + float xb = x0 + dx; + float x_top, x_bottom; + float sy0,sy1; + float dy = e->fdy; + STBTT_assert(e->sy <= y_bottom && e->ey >= y_top); + + // compute endpoints of line segment clipped to this scanline (if the + // line segment starts on this scanline. x0 is the intersection of the + // line with y_top, but that may be off the line segment. + if (e->sy > y_top) { + x_top = x0 + dx * (e->sy - y_top); + sy0 = e->sy; + } else { + x_top = x0; + sy0 = y_top; + } + if (e->ey < y_bottom) { + x_bottom = x0 + dx * (e->ey - y_top); + sy1 = e->ey; + } else { + x_bottom = xb; + sy1 = y_bottom; + } + + if (x_top >= 0 && x_bottom >= 0 && x_top < len && x_bottom < len) { + // from here on, we don't have to range check x values + + if ((int) x_top == (int) x_bottom) { + float height; + // simple case, only spans one pixel + int x = (int) x_top; + height = sy1 - sy0; + STBTT_assert(x >= 0 && x < len); + scanline[x] += e->direction * (1-((x_top - x) + (x_bottom-x))/2) * height; + scanline_fill[x] += e->direction * height; // everything right of this pixel is filled + } else { + int x,x1,x2; + float y_crossing, y_final, step, sign, area; + // covers 2+ pixels + if (x_top > x_bottom) { + // flip scanline vertically; signed area is the same + float t; + sy0 = y_bottom - (sy0 - y_top); + sy1 = y_bottom - (sy1 - y_top); + t = sy0, sy0 = sy1, sy1 = t; + t = x_bottom, x_bottom = x_top, x_top = t; + dx = -dx; + dy = -dy; + t = x0, x0 = xb, xb = t; + } + assert(dy >= 0); + assert(dx >= 0); + + x1 = (int) x_top; + x2 = (int) x_bottom; + // compute intersection with y axis at x1+1 + y_crossing = (x1+1 - x0) * dy + y_top; + // if x2 is right at the right edge of x1, y_crossing can blow up, github #1057 + if (y_crossing > y_bottom) + y_crossing = y_bottom; + + sign = e->direction; + // area of the rectangle covered from y0..y_crossing + area = sign * (y_crossing-sy0); + // area of the triangle (x_top,y0), (x+1,y0), (x+1,y_crossing) + scanline[x1] += area * (x1+1 - x_top)/2; + + // check if final y_crossing is blown up; no test case for this + y_final = y_crossing + dy * (x2 - (x1+1)); // advance y by number of steps taken below + if (y_final > y_bottom) { + y_final = y_bottom; + dy = (y_final - y_crossing ) / (x2 - (x1+1)); // if denom=0, y_final = y_crossing, so y_final <= y_bottom + } + + step = sign * dy * 1; // dy is dy/dx, change in y for every 1 change in x, which is also how much pixel area changes for each step in x + for (x = x1+1; x < x2; ++x) { + scanline[x] += area + step/2; // area of parallelogram is step/2 + area += step; + } + STBTT_assert(STBTT_fabs(area) <= 1.01f); // accumulated error from area += step unless we round step down + + // area of the triangle (x2,y_crossing), (x_bottom,y1), (x2,y1) + scanline[x2] += area + sign * (x_bottom - x2)/2 * (sy1-y_crossing); + + scanline_fill[x2] += sign * (sy1-sy0); + } + } else { + // if edge goes outside of box we're drawing, we require + // clipping logic. since this does not match the intended use + // of this library, we use a different, very slow brute + // force implementation + int x; + for (x=0; x < len; ++x) { + // cases: + // + // there can be up to two intersections with the pixel. any intersection + // with left or right edges can be handled by splitting into two (or three) + // regions. intersections with top & bottom do not necessitate case-wise logic. + // + // the old way of doing this found the intersections with the left & right edges, + // then used some simple logic to produce up to three segments in sorted order + // from top-to-bottom. however, this had a problem: if an x edge was epsilon + // across the x border, then the corresponding y position might not be distinct + // from the other y segment, and it might ignored as an empty segment. to avoid + // that, we need to explicitly produce segments based on x positions. + + // rename variables to clearly-defined pairs + float y0 = y_top; + float x1 = (float) (x); + float x2 = (float) (x+1); + float x3 = xb; + float y3 = y_bottom; + + // x = e->x + e->dx * (y-y_top) + // (y-y_top) = (x - e->x) / e->dx + // y = (x - e->x) / e->dx + y_top + float y1 = (x - x0) / dx + y_top; + float y2 = (x+1 - x0) / dx + y_top; + + if (x0 < x1 && x3 > x2) { // three segments descending down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else if (x3 < x1 && x0 > x2) { // three segments descending down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x0 < x1 && x3 > x1) { // two segments across x, down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x3 < x1 && x0 > x1) { // two segments across x, down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x0 < x2 && x3 > x2) { // two segments across x+1, down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else if (x3 < x2 && x0 > x2) { // two segments across x+1, down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else { // one segment + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x3,y3); + } + } + } + } + e = e->next; + } +} + +// directly AA rasterize edges w/o supersampling +static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) +{ + stbtt__hheap hh = { 0, 0, 0 }; + stbtt__active_edge *active = NULL; + int y,j=0, i; + float scanline_data[129], *scanline, *scanline2; + + STBTT__NOTUSED(vsubsample); + + if (result->w > 64) + scanline = (float *) STBTT_malloc((result->w*2+1) * sizeof(float), userdata); + else + scanline = scanline_data; + + scanline2 = scanline + result->w; + + y = off_y; + e[n].y0 = (float) (off_y + result->h) + 1; + + while (j < result->h) { + // find center of pixel for this scanline + float scan_y_top = y + 0.0f; + float scan_y_bottom = y + 1.0f; + stbtt__active_edge **step = &active; + + STBTT_memset(scanline , 0, result->w*sizeof(scanline[0])); + STBTT_memset(scanline2, 0, (result->w+1)*sizeof(scanline[0])); + + // update all active edges; + // remove all active edges that terminate before the top of this scanline + while (*step) { + stbtt__active_edge * z = *step; + if (z->ey <= scan_y_top) { + *step = z->next; // delete from list + STBTT_assert(z->direction); + z->direction = 0; + stbtt__hheap_free(&hh, z); + } else { + step = &((*step)->next); // advance through list + } + } + + // insert all edges that start before the bottom of this scanline + while (e->y0 <= scan_y_bottom) { + if (e->y0 != e->y1) { + stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y_top, userdata); + if (z != NULL) { + if (j == 0 && off_y != 0) { + if (z->ey < scan_y_top) { + // this can happen due to subpixel positioning and some kind of fp rounding error i think + z->ey = scan_y_top; + } + } + STBTT_assert(z->ey >= scan_y_top); // if we get really unlucky a tiny bit of an edge can be out of bounds + // insert at front + z->next = active; + active = z; + } + } + ++e; + } + + // now process all active edges + if (active) + stbtt__fill_active_edges_new(scanline, scanline2+1, result->w, active, scan_y_top); + + { + float sum = 0; + for (i=0; i < result->w; ++i) { + float k; + int m; + sum += scanline2[i]; + k = scanline[i] + sum; + k = (float) STBTT_fabs(k)*255 + 0.5f; + m = (int) k; + if (m > 255) m = 255; + result->pixels[j*result->stride + i] = (unsigned char) m; + } + } + // advance all the edges + step = &active; + while (*step) { + stbtt__active_edge *z = *step; + z->fx += z->fdx; // advance to position for current scanline + step = &((*step)->next); // advance through list + } + + ++y; + ++j; + } + + stbtt__hheap_cleanup(&hh, userdata); + + if (scanline != scanline_data) + STBTT_free(scanline, userdata); +} +#else +#error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + +#define STBTT__COMPARE(a,b) ((a)->y0 < (b)->y0) + +static void stbtt__sort_edges_ins_sort(stbtt__edge *p, int n) +{ + int i,j; + for (i=1; i < n; ++i) { + stbtt__edge t = p[i], *a = &t; + j = i; + while (j > 0) { + stbtt__edge *b = &p[j-1]; + int c = STBTT__COMPARE(a,b); + if (!c) break; + p[j] = p[j-1]; + --j; + } + if (i != j) + p[j] = t; + } +} + +static void stbtt__sort_edges_quicksort(stbtt__edge *p, int n) +{ + /* threshold for transitioning to insertion sort */ + while (n > 12) { + stbtt__edge t; + int c01,c12,c,m,i,j; + + /* compute median of three */ + m = n >> 1; + c01 = STBTT__COMPARE(&p[0],&p[m]); + c12 = STBTT__COMPARE(&p[m],&p[n-1]); + /* if 0 >= mid >= end, or 0 < mid < end, then use mid */ + if (c01 != c12) { + /* otherwise, we'll need to swap something else to middle */ + int z; + c = STBTT__COMPARE(&p[0],&p[n-1]); + /* 0>mid && midn => n; 0 0 */ + /* 0n: 0>n => 0; 0 n */ + z = (c == c12) ? 0 : n-1; + t = p[z]; + p[z] = p[m]; + p[m] = t; + } + /* now p[m] is the median-of-three */ + /* swap it to the beginning so it won't move around */ + t = p[0]; + p[0] = p[m]; + p[m] = t; + + /* partition loop */ + i=1; + j=n-1; + for(;;) { + /* handling of equality is crucial here */ + /* for sentinels & efficiency with duplicates */ + for (;;++i) { + if (!STBTT__COMPARE(&p[i], &p[0])) break; + } + for (;;--j) { + if (!STBTT__COMPARE(&p[0], &p[j])) break; + } + /* make sure we haven't crossed */ + if (i >= j) break; + t = p[i]; + p[i] = p[j]; + p[j] = t; + + ++i; + --j; + } + /* recurse on smaller side, iterate on larger */ + if (j < (n-i)) { + stbtt__sort_edges_quicksort(p,j); + p = p+i; + n = n-i; + } else { + stbtt__sort_edges_quicksort(p+i, n-i); + n = j; + } + } +} + +static void stbtt__sort_edges(stbtt__edge *p, int n) +{ + stbtt__sort_edges_quicksort(p, n); + stbtt__sort_edges_ins_sort(p, n); +} + +typedef struct +{ + float x,y; +} stbtt__point; + +static void stbtt__rasterize(stbtt__bitmap *result, stbtt__point *pts, int *wcount, int windings, float scale_x, float scale_y, float shift_x, float shift_y, int off_x, int off_y, int invert, void *userdata) +{ + float y_scale_inv = invert ? -scale_y : scale_y; + stbtt__edge *e; + int n,i,j,k,m; +#if STBTT_RASTERIZER_VERSION == 1 + int vsubsample = result->h < 8 ? 15 : 5; +#elif STBTT_RASTERIZER_VERSION == 2 + int vsubsample = 1; +#else + #error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + // vsubsample should divide 255 evenly; otherwise we won't reach full opacity + + // now we have to blow out the windings into explicit edge lists + n = 0; + for (i=0; i < windings; ++i) + n += wcount[i]; + + e = (stbtt__edge *) STBTT_malloc(sizeof(*e) * (n+1), userdata); // add an extra one as a sentinel + if (e == 0) return; + n = 0; + + m=0; + for (i=0; i < windings; ++i) { + stbtt__point *p = pts + m; + m += wcount[i]; + j = wcount[i]-1; + for (k=0; k < wcount[i]; j=k++) { + int a=k,b=j; + // skip the edge if horizontal + if (p[j].y == p[k].y) + continue; + // add edge from j to k to the list + e[n].invert = 0; + if (invert ? p[j].y > p[k].y : p[j].y < p[k].y) { + e[n].invert = 1; + a=j,b=k; + } + e[n].x0 = p[a].x * scale_x + shift_x; + e[n].y0 = (p[a].y * y_scale_inv + shift_y) * vsubsample; + e[n].x1 = p[b].x * scale_x + shift_x; + e[n].y1 = (p[b].y * y_scale_inv + shift_y) * vsubsample; + ++n; + } + } + + // now sort the edges by their highest point (should snap to integer, and then by x) + //STBTT_sort(e, n, sizeof(e[0]), stbtt__edge_compare); + stbtt__sort_edges(e, n); + + // now, traverse the scanlines and find the intersections on each scanline, use xor winding rule + stbtt__rasterize_sorted_edges(result, e, n, vsubsample, off_x, off_y, userdata); + + STBTT_free(e, userdata); +} + +static void stbtt__add_point(stbtt__point *points, int n, float x, float y) +{ + if (!points) return; // during first pass, it's unallocated + points[n].x = x; + points[n].y = y; +} + +// tessellate until threshold p is happy... @TODO warped to compensate for non-linear stretching +static int stbtt__tesselate_curve(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float objspace_flatness_squared, int n) +{ + // midpoint + float mx = (x0 + 2*x1 + x2)/4; + float my = (y0 + 2*y1 + y2)/4; + // versus directly drawn line + float dx = (x0+x2)/2 - mx; + float dy = (y0+y2)/2 - my; + if (n > 16) // 65536 segments on one curve better be enough! + return 1; + if (dx*dx+dy*dy > objspace_flatness_squared) { // half-pixel error allowed... need to be smaller if AA + stbtt__tesselate_curve(points, num_points, x0,y0, (x0+x1)/2.0f,(y0+y1)/2.0f, mx,my, objspace_flatness_squared,n+1); + stbtt__tesselate_curve(points, num_points, mx,my, (x1+x2)/2.0f,(y1+y2)/2.0f, x2,y2, objspace_flatness_squared,n+1); + } else { + stbtt__add_point(points, *num_points,x2,y2); + *num_points = *num_points+1; + } + return 1; +} + +static void stbtt__tesselate_cubic(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float objspace_flatness_squared, int n) +{ + // @TODO this "flatness" calculation is just made-up nonsense that seems to work well enough + float dx0 = x1-x0; + float dy0 = y1-y0; + float dx1 = x2-x1; + float dy1 = y2-y1; + float dx2 = x3-x2; + float dy2 = y3-y2; + float dx = x3-x0; + float dy = y3-y0; + float longlen = (float) (STBTT_sqrt(dx0*dx0+dy0*dy0)+STBTT_sqrt(dx1*dx1+dy1*dy1)+STBTT_sqrt(dx2*dx2+dy2*dy2)); + float shortlen = (float) STBTT_sqrt(dx*dx+dy*dy); + float flatness_squared = longlen*longlen-shortlen*shortlen; + + if (n > 16) // 65536 segments on one curve better be enough! + return; + + if (flatness_squared > objspace_flatness_squared) { + float x01 = (x0+x1)/2; + float y01 = (y0+y1)/2; + float x12 = (x1+x2)/2; + float y12 = (y1+y2)/2; + float x23 = (x2+x3)/2; + float y23 = (y2+y3)/2; + + float xa = (x01+x12)/2; + float ya = (y01+y12)/2; + float xb = (x12+x23)/2; + float yb = (y12+y23)/2; + + float mx = (xa+xb)/2; + float my = (ya+yb)/2; + + stbtt__tesselate_cubic(points, num_points, x0,y0, x01,y01, xa,ya, mx,my, objspace_flatness_squared,n+1); + stbtt__tesselate_cubic(points, num_points, mx,my, xb,yb, x23,y23, x3,y3, objspace_flatness_squared,n+1); + } else { + stbtt__add_point(points, *num_points,x3,y3); + *num_points = *num_points+1; + } +} + +// returns number of contours +static stbtt__point *stbtt_FlattenCurves(stbtt_vertex *vertices, int num_verts, float objspace_flatness, int **contour_lengths, int *num_contours, void *userdata) +{ + stbtt__point *points=0; + int num_points=0; + + float objspace_flatness_squared = objspace_flatness * objspace_flatness; + int i,n=0,start=0, pass; + + // count how many "moves" there are to get the contour count + for (i=0; i < num_verts; ++i) + if (vertices[i].type == STBTT_vmove) + ++n; + + *num_contours = n; + if (n == 0) return 0; + + *contour_lengths = (int *) STBTT_malloc(sizeof(**contour_lengths) * n, userdata); + + if (*contour_lengths == 0) { + *num_contours = 0; + return 0; + } + + // make two passes through the points so we don't need to realloc + for (pass=0; pass < 2; ++pass) { + float x=0,y=0; + if (pass == 1) { + points = (stbtt__point *) STBTT_malloc(num_points * sizeof(points[0]), userdata); + if (points == NULL) goto error; + } + num_points = 0; + n= -1; + for (i=0; i < num_verts; ++i) { + switch (vertices[i].type) { + case STBTT_vmove: + // start the next contour + if (n >= 0) + (*contour_lengths)[n] = num_points - start; + ++n; + start = num_points; + + x = vertices[i].x, y = vertices[i].y; + stbtt__add_point(points, num_points++, x,y); + break; + case STBTT_vline: + x = vertices[i].x, y = vertices[i].y; + stbtt__add_point(points, num_points++, x, y); + break; + case STBTT_vcurve: + stbtt__tesselate_curve(points, &num_points, x,y, + vertices[i].cx, vertices[i].cy, + vertices[i].x, vertices[i].y, + objspace_flatness_squared, 0); + x = vertices[i].x, y = vertices[i].y; + break; + case STBTT_vcubic: + stbtt__tesselate_cubic(points, &num_points, x,y, + vertices[i].cx, vertices[i].cy, + vertices[i].cx1, vertices[i].cy1, + vertices[i].x, vertices[i].y, + objspace_flatness_squared, 0); + x = vertices[i].x, y = vertices[i].y; + break; + } + } + (*contour_lengths)[n] = num_points - start; + } + + return points; +error: + STBTT_free(points, userdata); + STBTT_free(*contour_lengths, userdata); + *contour_lengths = 0; + *num_contours = 0; + return NULL; +} + +STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, float flatness_in_pixels, stbtt_vertex *vertices, int num_verts, float scale_x, float scale_y, float shift_x, float shift_y, int x_off, int y_off, int invert, void *userdata) +{ + float scale = scale_x > scale_y ? scale_y : scale_x; + int winding_count = 0; + int *winding_lengths = NULL; + stbtt__point *windings = stbtt_FlattenCurves(vertices, num_verts, flatness_in_pixels / scale, &winding_lengths, &winding_count, userdata); + if (windings) { + stbtt__rasterize(result, windings, winding_lengths, winding_count, scale_x, scale_y, shift_x, shift_y, x_off, y_off, invert, userdata); + STBTT_free(winding_lengths, userdata); + STBTT_free(windings, userdata); + } +} + +STBTT_DEF void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata) +{ + STBTT_free(bitmap, userdata); +} + +STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff) +{ + int ix0,iy0,ix1,iy1; + stbtt__bitmap gbm; + stbtt_vertex *vertices; + int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); + + if (scale_x == 0) scale_x = scale_y; + if (scale_y == 0) { + if (scale_x == 0) { + STBTT_free(vertices, info->userdata); + return NULL; + } + scale_y = scale_x; + } + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0,&iy0,&ix1,&iy1); + + // now we get the size + gbm.w = (ix1 - ix0); + gbm.h = (iy1 - iy0); + gbm.pixels = NULL; // in case we error + + if (width ) *width = gbm.w; + if (height) *height = gbm.h; + if (xoff ) *xoff = ix0; + if (yoff ) *yoff = iy0; + + if (gbm.w && gbm.h) { + gbm.pixels = (unsigned char *) STBTT_malloc(gbm.w * gbm.h, info->userdata); + if (gbm.pixels) { + gbm.stride = gbm.w; + + stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0, iy0, 1, info->userdata); + } + } + STBTT_free(vertices, info->userdata); + return gbm.pixels; +} + +STBTT_DEF unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y, 0.0f, 0.0f, glyph, width, height, xoff, yoff); +} + +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph) +{ + int ix0,iy0; + stbtt_vertex *vertices; + int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); + stbtt__bitmap gbm; + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0,&iy0,0,0); + gbm.pixels = output; + gbm.w = out_w; + gbm.h = out_h; + gbm.stride = out_stride; + + if (gbm.w && gbm.h) + stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0,iy0, 1, info->userdata); + + STBTT_free(vertices, info->userdata); +} + +STBTT_DEF void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph) +{ + stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f,0.0f, glyph); +} + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y,shift_x,shift_y, stbtt_FindGlyphIndex(info,codepoint), width,height,xoff,yoff); +} + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint) +{ + stbtt_MakeGlyphBitmapSubpixelPrefilter(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, oversample_x, oversample_y, sub_x, sub_y, stbtt_FindGlyphIndex(info,codepoint)); +} + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint) +{ + stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, stbtt_FindGlyphIndex(info,codepoint)); +} + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetCodepointBitmapSubpixel(info, scale_x, scale_y, 0.0f,0.0f, codepoint, width,height,xoff,yoff); +} + +STBTT_DEF void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint) +{ + stbtt_MakeCodepointBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f,0.0f, codepoint); +} + +////////////////////////////////////////////////////////////////////////////// +// +// bitmap baking +// +// This is SUPER-CRAPPY packing to keep source code small + +static int stbtt_BakeFontBitmap_internal(unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) + float pixel_height, // height of font in pixels + unsigned char *pixels, int pw, int ph, // bitmap to be filled in + int first_char, int num_chars, // characters to bake + stbtt_bakedchar *chardata) +{ + float scale; + int x,y,bottom_y, i; + stbtt_fontinfo f; + f.userdata = NULL; + if (!stbtt_InitFont(&f, data, offset)) + return -1; + STBTT_memset(pixels, 0, pw*ph); // background of 0 around pixels + x=y=1; + bottom_y = 1; + + scale = stbtt_ScaleForPixelHeight(&f, pixel_height); + + for (i=0; i < num_chars; ++i) { + int advance, lsb, x0,y0,x1,y1,gw,gh; + int g = stbtt_FindGlyphIndex(&f, first_char + i); + stbtt_GetGlyphHMetrics(&f, g, &advance, &lsb); + stbtt_GetGlyphBitmapBox(&f, g, scale,scale, &x0,&y0,&x1,&y1); + gw = x1-x0; + gh = y1-y0; + if (x + gw + 1 >= pw) + y = bottom_y, x = 1; // advance to next row + if (y + gh + 1 >= ph) // check if it fits vertically AFTER potentially moving to next row + return -i; + STBTT_assert(x+gw < pw); + STBTT_assert(y+gh < ph); + stbtt_MakeGlyphBitmap(&f, pixels+x+y*pw, gw,gh,pw, scale,scale, g); + chardata[i].x0 = (stbtt_int16) x; + chardata[i].y0 = (stbtt_int16) y; + chardata[i].x1 = (stbtt_int16) (x + gw); + chardata[i].y1 = (stbtt_int16) (y + gh); + chardata[i].xadvance = scale * advance; + chardata[i].xoff = (float) x0; + chardata[i].yoff = (float) y0; + x = x + gw + 1; + if (y+gh+1 > bottom_y) + bottom_y = y+gh+1; + } + return bottom_y; +} + +STBTT_DEF void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int opengl_fillrule) +{ + float d3d_bias = opengl_fillrule ? 0 : -0.5f; + float ipw = 1.0f / pw, iph = 1.0f / ph; + const stbtt_bakedchar *b = chardata + char_index; + int round_x = STBTT_ifloor((*xpos + b->xoff) + 0.5f); + int round_y = STBTT_ifloor((*ypos + b->yoff) + 0.5f); + + q->x0 = round_x + d3d_bias; + q->y0 = round_y + d3d_bias; + q->x1 = round_x + b->x1 - b->x0 + d3d_bias; + q->y1 = round_y + b->y1 - b->y0 + d3d_bias; + + q->s0 = b->x0 * ipw; + q->t0 = b->y0 * iph; + q->s1 = b->x1 * ipw; + q->t1 = b->y1 * iph; + + *xpos += b->xadvance; +} + +////////////////////////////////////////////////////////////////////////////// +// +// rectangle packing replacement routines if you don't have stb_rect_pack.h +// + +#ifndef STB_RECT_PACK_VERSION + +typedef int stbrp_coord; + +//////////////////////////////////////////////////////////////////////////////////// +// // +// // +// COMPILER WARNING ?!?!? // +// // +// // +// if you get a compile warning due to these symbols being defined more than // +// once, move #include "stb_rect_pack.h" before #include "stb_truetype.h" // +// // +//////////////////////////////////////////////////////////////////////////////////// + +typedef struct +{ + int width,height; + int x,y,bottom_y; +} stbrp_context; + +typedef struct +{ + unsigned char x; +} stbrp_node; + +struct stbrp_rect +{ + stbrp_coord x,y; + int id,w,h,was_packed; +}; + +static void stbrp_init_target(stbrp_context *con, int pw, int ph, stbrp_node *nodes, int num_nodes) +{ + con->width = pw; + con->height = ph; + con->x = 0; + con->y = 0; + con->bottom_y = 0; + STBTT__NOTUSED(nodes); + STBTT__NOTUSED(num_nodes); +} + +static void stbrp_pack_rects(stbrp_context *con, stbrp_rect *rects, int num_rects) +{ + int i; + for (i=0; i < num_rects; ++i) { + if (con->x + rects[i].w > con->width) { + con->x = 0; + con->y = con->bottom_y; + } + if (con->y + rects[i].h > con->height) + break; + rects[i].x = con->x; + rects[i].y = con->y; + rects[i].was_packed = 1; + con->x += rects[i].w; + if (con->y + rects[i].h > con->bottom_y) + con->bottom_y = con->y + rects[i].h; + } + for ( ; i < num_rects; ++i) + rects[i].was_packed = 0; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// bitmap baking +// +// This is SUPER-AWESOME (tm Ryan Gordon) packing using stb_rect_pack.h. If +// stb_rect_pack.h isn't available, it uses the BakeFontBitmap strategy. + +STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int pw, int ph, int stride_in_bytes, int padding, void *alloc_context) +{ + stbrp_context *context = (stbrp_context *) STBTT_malloc(sizeof(*context) ,alloc_context); + int num_nodes = pw - padding; + stbrp_node *nodes = (stbrp_node *) STBTT_malloc(sizeof(*nodes ) * num_nodes,alloc_context); + + if (context == NULL || nodes == NULL) { + if (context != NULL) STBTT_free(context, alloc_context); + if (nodes != NULL) STBTT_free(nodes , alloc_context); + return 0; + } + + spc->user_allocator_context = alloc_context; + spc->width = pw; + spc->height = ph; + spc->pixels = pixels; + spc->pack_info = context; + spc->nodes = nodes; + spc->padding = padding; + spc->stride_in_bytes = stride_in_bytes != 0 ? stride_in_bytes : pw; + spc->h_oversample = 1; + spc->v_oversample = 1; + spc->skip_missing = 0; + + stbrp_init_target(context, pw-padding, ph-padding, nodes, num_nodes); + + if (pixels) + STBTT_memset(pixels, 0, pw*ph); // background of 0 around pixels + + return 1; +} + +STBTT_DEF void stbtt_PackEnd (stbtt_pack_context *spc) +{ + STBTT_free(spc->nodes , spc->user_allocator_context); + STBTT_free(spc->pack_info, spc->user_allocator_context); +} + +STBTT_DEF void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample) +{ + STBTT_assert(h_oversample <= STBTT_MAX_OVERSAMPLE); + STBTT_assert(v_oversample <= STBTT_MAX_OVERSAMPLE); + if (h_oversample <= STBTT_MAX_OVERSAMPLE) + spc->h_oversample = h_oversample; + if (v_oversample <= STBTT_MAX_OVERSAMPLE) + spc->v_oversample = v_oversample; +} + +STBTT_DEF void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip) +{ + spc->skip_missing = skip; +} + +#define STBTT__OVER_MASK (STBTT_MAX_OVERSAMPLE-1) + +static void stbtt__h_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) +{ + unsigned char buffer[STBTT_MAX_OVERSAMPLE]; + int safe_w = w - kernel_width; + int j; + STBTT_memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze + for (j=0; j < h; ++j) { + int i; + unsigned int total; + STBTT_memset(buffer, 0, kernel_width); + + total = 0; + + // make kernel_width a constant in common cases so compiler can optimize out the divide + switch (kernel_width) { + case 2: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 2); + } + break; + case 3: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 3); + } + break; + case 4: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 4); + } + break; + case 5: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 5); + } + break; + default: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / kernel_width); + } + break; + } + + for (; i < w; ++i) { + STBTT_assert(pixels[i] == 0); + total -= buffer[i & STBTT__OVER_MASK]; + pixels[i] = (unsigned char) (total / kernel_width); + } + + pixels += stride_in_bytes; + } +} + +static void stbtt__v_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) +{ + unsigned char buffer[STBTT_MAX_OVERSAMPLE]; + int safe_h = h - kernel_width; + int j; + STBTT_memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze + for (j=0; j < w; ++j) { + int i; + unsigned int total; + STBTT_memset(buffer, 0, kernel_width); + + total = 0; + + // make kernel_width a constant in common cases so compiler can optimize out the divide + switch (kernel_width) { + case 2: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 2); + } + break; + case 3: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 3); + } + break; + case 4: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 4); + } + break; + case 5: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 5); + } + break; + default: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / kernel_width); + } + break; + } + + for (; i < h; ++i) { + STBTT_assert(pixels[i*stride_in_bytes] == 0); + total -= buffer[i & STBTT__OVER_MASK]; + pixels[i*stride_in_bytes] = (unsigned char) (total / kernel_width); + } + + pixels += 1; + } +} + +static float stbtt__oversample_shift(int oversample) +{ + if (!oversample) + return 0.0f; + + // The prefilter is a box filter of width "oversample", + // which shifts phase by (oversample - 1)/2 pixels in + // oversampled space. We want to shift in the opposite + // direction to counter this. + return (float)-(oversample - 1) / (2.0f * (float)oversample); +} + +// rects array must be big enough to accommodate all characters in the given ranges +STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) +{ + int i,j,k; + int missing_glyph_added = 0; + + k=0; + for (i=0; i < num_ranges; ++i) { + float fh = ranges[i].font_size; + float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); + ranges[i].h_oversample = (unsigned char) spc->h_oversample; + ranges[i].v_oversample = (unsigned char) spc->v_oversample; + for (j=0; j < ranges[i].num_chars; ++j) { + int x0,y0,x1,y1; + int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; + int glyph = stbtt_FindGlyphIndex(info, codepoint); + if (glyph == 0 && (spc->skip_missing || missing_glyph_added)) { + rects[k].w = rects[k].h = 0; + } else { + stbtt_GetGlyphBitmapBoxSubpixel(info,glyph, + scale * spc->h_oversample, + scale * spc->v_oversample, + 0,0, + &x0,&y0,&x1,&y1); + rects[k].w = (stbrp_coord) (x1-x0 + spc->padding + spc->h_oversample-1); + rects[k].h = (stbrp_coord) (y1-y0 + spc->padding + spc->v_oversample-1); + if (glyph == 0) + missing_glyph_added = 1; + } + ++k; + } + } + + return k; +} + +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int prefilter_x, int prefilter_y, float *sub_x, float *sub_y, int glyph) +{ + stbtt_MakeGlyphBitmapSubpixel(info, + output, + out_w - (prefilter_x - 1), + out_h - (prefilter_y - 1), + out_stride, + scale_x, + scale_y, + shift_x, + shift_y, + glyph); + + if (prefilter_x > 1) + stbtt__h_prefilter(output, out_w, out_h, out_stride, prefilter_x); + + if (prefilter_y > 1) + stbtt__v_prefilter(output, out_w, out_h, out_stride, prefilter_y); + + *sub_x = stbtt__oversample_shift(prefilter_x); + *sub_y = stbtt__oversample_shift(prefilter_y); +} + +// rects array must be big enough to accommodate all characters in the given ranges +STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) +{ + int i,j,k, missing_glyph = -1, return_value = 1; + + // save current values + int old_h_over = spc->h_oversample; + int old_v_over = spc->v_oversample; + + k = 0; + for (i=0; i < num_ranges; ++i) { + float fh = ranges[i].font_size; + float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); + float recip_h,recip_v,sub_x,sub_y; + spc->h_oversample = ranges[i].h_oversample; + spc->v_oversample = ranges[i].v_oversample; + recip_h = 1.0f / spc->h_oversample; + recip_v = 1.0f / spc->v_oversample; + sub_x = stbtt__oversample_shift(spc->h_oversample); + sub_y = stbtt__oversample_shift(spc->v_oversample); + for (j=0; j < ranges[i].num_chars; ++j) { + stbrp_rect *r = &rects[k]; + if (r->was_packed && r->w != 0 && r->h != 0) { + stbtt_packedchar *bc = &ranges[i].chardata_for_range[j]; + int advance, lsb, x0,y0,x1,y1; + int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; + int glyph = stbtt_FindGlyphIndex(info, codepoint); + stbrp_coord pad = (stbrp_coord) spc->padding; + + // pad on left and top + r->x += pad; + r->y += pad; + r->w -= pad; + r->h -= pad; + stbtt_GetGlyphHMetrics(info, glyph, &advance, &lsb); + stbtt_GetGlyphBitmapBox(info, glyph, + scale * spc->h_oversample, + scale * spc->v_oversample, + &x0,&y0,&x1,&y1); + stbtt_MakeGlyphBitmapSubpixel(info, + spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w - spc->h_oversample+1, + r->h - spc->v_oversample+1, + spc->stride_in_bytes, + scale * spc->h_oversample, + scale * spc->v_oversample, + 0,0, + glyph); + + if (spc->h_oversample > 1) + stbtt__h_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w, r->h, spc->stride_in_bytes, + spc->h_oversample); + + if (spc->v_oversample > 1) + stbtt__v_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w, r->h, spc->stride_in_bytes, + spc->v_oversample); + + bc->x0 = (stbtt_int16) r->x; + bc->y0 = (stbtt_int16) r->y; + bc->x1 = (stbtt_int16) (r->x + r->w); + bc->y1 = (stbtt_int16) (r->y + r->h); + bc->xadvance = scale * advance; + bc->xoff = (float) x0 * recip_h + sub_x; + bc->yoff = (float) y0 * recip_v + sub_y; + bc->xoff2 = (x0 + r->w) * recip_h + sub_x; + bc->yoff2 = (y0 + r->h) * recip_v + sub_y; + + if (glyph == 0) + missing_glyph = j; + } else if (spc->skip_missing) { + return_value = 0; + } else if (r->was_packed && r->w == 0 && r->h == 0 && missing_glyph >= 0) { + ranges[i].chardata_for_range[j] = ranges[i].chardata_for_range[missing_glyph]; + } else { + return_value = 0; // if any fail, report failure + } + + ++k; + } + } + + // restore original values + spc->h_oversample = old_h_over; + spc->v_oversample = old_v_over; + + return return_value; +} + +STBTT_DEF void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects) +{ + stbrp_pack_rects((stbrp_context *) spc->pack_info, rects, num_rects); +} + +STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges) +{ + stbtt_fontinfo info; + int i,j,n, return_value = 1; + //stbrp_context *context = (stbrp_context *) spc->pack_info; + stbrp_rect *rects; + + // flag all characters as NOT packed + for (i=0; i < num_ranges; ++i) + for (j=0; j < ranges[i].num_chars; ++j) + ranges[i].chardata_for_range[j].x0 = + ranges[i].chardata_for_range[j].y0 = + ranges[i].chardata_for_range[j].x1 = + ranges[i].chardata_for_range[j].y1 = 0; + + n = 0; + for (i=0; i < num_ranges; ++i) + n += ranges[i].num_chars; + + rects = (stbrp_rect *) STBTT_malloc(sizeof(*rects) * n, spc->user_allocator_context); + if (rects == NULL) + return 0; + + info.userdata = spc->user_allocator_context; + stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata,font_index)); + + n = stbtt_PackFontRangesGatherRects(spc, &info, ranges, num_ranges, rects); + + stbtt_PackFontRangesPackRects(spc, rects, n); + + return_value = stbtt_PackFontRangesRenderIntoRects(spc, &info, ranges, num_ranges, rects); + + STBTT_free(rects, spc->user_allocator_context); + return return_value; +} + +STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, + int first_unicode_codepoint_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range) +{ + stbtt_pack_range range; + range.first_unicode_codepoint_in_range = first_unicode_codepoint_in_range; + range.array_of_unicode_codepoints = NULL; + range.num_chars = num_chars_in_range; + range.chardata_for_range = chardata_for_range; + range.font_size = font_size; + return stbtt_PackFontRanges(spc, fontdata, font_index, &range, 1); +} + +STBTT_DEF void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap) +{ + int i_ascent, i_descent, i_lineGap; + float scale; + stbtt_fontinfo info; + stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata, index)); + scale = size > 0 ? stbtt_ScaleForPixelHeight(&info, size) : stbtt_ScaleForMappingEmToPixels(&info, -size); + stbtt_GetFontVMetrics(&info, &i_ascent, &i_descent, &i_lineGap); + *ascent = (float) i_ascent * scale; + *descent = (float) i_descent * scale; + *lineGap = (float) i_lineGap * scale; +} + +STBTT_DEF void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int align_to_integer) +{ + float ipw = 1.0f / pw, iph = 1.0f / ph; + const stbtt_packedchar *b = chardata + char_index; + + if (align_to_integer) { + float x = (float) STBTT_ifloor((*xpos + b->xoff) + 0.5f); + float y = (float) STBTT_ifloor((*ypos + b->yoff) + 0.5f); + q->x0 = x; + q->y0 = y; + q->x1 = x + b->xoff2 - b->xoff; + q->y1 = y + b->yoff2 - b->yoff; + } else { + q->x0 = *xpos + b->xoff; + q->y0 = *ypos + b->yoff; + q->x1 = *xpos + b->xoff2; + q->y1 = *ypos + b->yoff2; + } + + q->s0 = b->x0 * ipw; + q->t0 = b->y0 * iph; + q->s1 = b->x1 * ipw; + q->t1 = b->y1 * iph; + + *xpos += b->xadvance; +} + +////////////////////////////////////////////////////////////////////////////// +// +// sdf computation +// + +#define STBTT_min(a,b) ((a) < (b) ? (a) : (b)) +#define STBTT_max(a,b) ((a) < (b) ? (b) : (a)) + +static int stbtt__ray_intersect_bezier(float orig[2], float ray[2], float q0[2], float q1[2], float q2[2], float hits[2][2]) +{ + float q0perp = q0[1]*ray[0] - q0[0]*ray[1]; + float q1perp = q1[1]*ray[0] - q1[0]*ray[1]; + float q2perp = q2[1]*ray[0] - q2[0]*ray[1]; + float roperp = orig[1]*ray[0] - orig[0]*ray[1]; + + float a = q0perp - 2*q1perp + q2perp; + float b = q1perp - q0perp; + float c = q0perp - roperp; + + float s0 = 0., s1 = 0.; + int num_s = 0; + + if (a != 0.0) { + float discr = b*b - a*c; + if (discr > 0.0) { + float rcpna = -1 / a; + float d = (float) STBTT_sqrt(discr); + s0 = (b+d) * rcpna; + s1 = (b-d) * rcpna; + if (s0 >= 0.0 && s0 <= 1.0) + num_s = 1; + if (d > 0.0 && s1 >= 0.0 && s1 <= 1.0) { + if (num_s == 0) s0 = s1; + ++num_s; + } + } + } else { + // 2*b*s + c = 0 + // s = -c / (2*b) + s0 = c / (-2 * b); + if (s0 >= 0.0 && s0 <= 1.0) + num_s = 1; + } + + if (num_s == 0) + return 0; + else { + float rcp_len2 = 1 / (ray[0]*ray[0] + ray[1]*ray[1]); + float rayn_x = ray[0] * rcp_len2, rayn_y = ray[1] * rcp_len2; + + float q0d = q0[0]*rayn_x + q0[1]*rayn_y; + float q1d = q1[0]*rayn_x + q1[1]*rayn_y; + float q2d = q2[0]*rayn_x + q2[1]*rayn_y; + float rod = orig[0]*rayn_x + orig[1]*rayn_y; + + float q10d = q1d - q0d; + float q20d = q2d - q0d; + float q0rd = q0d - rod; + + hits[0][0] = q0rd + s0*(2.0f - 2.0f*s0)*q10d + s0*s0*q20d; + hits[0][1] = a*s0+b; + + if (num_s > 1) { + hits[1][0] = q0rd + s1*(2.0f - 2.0f*s1)*q10d + s1*s1*q20d; + hits[1][1] = a*s1+b; + return 2; + } else { + return 1; + } + } +} + +static int equal(float *a, float *b) +{ + return (a[0] == b[0] && a[1] == b[1]); +} + +static int stbtt__compute_crossings_x(float x, float y, int nverts, stbtt_vertex *verts) +{ + int i; + float orig[2], ray[2] = { 1, 0 }; + float y_frac; + int winding = 0; + + // make sure y never passes through a vertex of the shape + y_frac = (float) STBTT_fmod(y, 1.0f); + if (y_frac < 0.01f) + y += 0.01f; + else if (y_frac > 0.99f) + y -= 0.01f; + + orig[0] = x; + orig[1] = y; + + // test a ray from (-infinity,y) to (x,y) + for (i=0; i < nverts; ++i) { + if (verts[i].type == STBTT_vline) { + int x0 = (int) verts[i-1].x, y0 = (int) verts[i-1].y; + int x1 = (int) verts[i ].x, y1 = (int) verts[i ].y; + if (y > STBTT_min(y0,y1) && y < STBTT_max(y0,y1) && x > STBTT_min(x0,x1)) { + float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; + if (x_inter < x) + winding += (y0 < y1) ? 1 : -1; + } + } + if (verts[i].type == STBTT_vcurve) { + int x0 = (int) verts[i-1].x , y0 = (int) verts[i-1].y ; + int x1 = (int) verts[i ].cx, y1 = (int) verts[i ].cy; + int x2 = (int) verts[i ].x , y2 = (int) verts[i ].y ; + int ax = STBTT_min(x0,STBTT_min(x1,x2)), ay = STBTT_min(y0,STBTT_min(y1,y2)); + int by = STBTT_max(y0,STBTT_max(y1,y2)); + if (y > ay && y < by && x > ax) { + float q0[2],q1[2],q2[2]; + float hits[2][2]; + q0[0] = (float)x0; + q0[1] = (float)y0; + q1[0] = (float)x1; + q1[1] = (float)y1; + q2[0] = (float)x2; + q2[1] = (float)y2; + if (equal(q0,q1) || equal(q1,q2)) { + x0 = (int)verts[i-1].x; + y0 = (int)verts[i-1].y; + x1 = (int)verts[i ].x; + y1 = (int)verts[i ].y; + if (y > STBTT_min(y0,y1) && y < STBTT_max(y0,y1) && x > STBTT_min(x0,x1)) { + float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; + if (x_inter < x) + winding += (y0 < y1) ? 1 : -1; + } + } else { + int num_hits = stbtt__ray_intersect_bezier(orig, ray, q0, q1, q2, hits); + if (num_hits >= 1) + if (hits[0][0] < 0) + winding += (hits[0][1] < 0 ? -1 : 1); + if (num_hits >= 2) + if (hits[1][0] < 0) + winding += (hits[1][1] < 0 ? -1 : 1); + } + } + } + } + return winding; +} + +static float stbtt__cuberoot( float x ) +{ + if (x<0) + return -(float) STBTT_pow(-x,1.0f/3.0f); + else + return (float) STBTT_pow( x,1.0f/3.0f); +} + +// x^3 + a*x^2 + b*x + c = 0 +static int stbtt__solve_cubic(float a, float b, float c, float* r) +{ + float s = -a / 3; + float p = b - a*a / 3; + float q = a * (2*a*a - 9*b) / 27 + c; + float p3 = p*p*p; + float d = q*q + 4*p3 / 27; + if (d >= 0) { + float z = (float) STBTT_sqrt(d); + float u = (-q + z) / 2; + float v = (-q - z) / 2; + u = stbtt__cuberoot(u); + v = stbtt__cuberoot(v); + r[0] = s + u + v; + return 1; + } else { + float u = (float) STBTT_sqrt(-p/3); + float v = (float) STBTT_acos(-STBTT_sqrt(-27/p3) * q / 2) / 3; // p3 must be negative, since d is negative + float m = (float) STBTT_cos(v); + float n = (float) STBTT_cos(v-3.141592/2)*1.732050808f; + r[0] = s + u * 2 * m; + r[1] = s - u * (m + n); + r[2] = s - u * (m - n); + + //STBTT_assert( STBTT_fabs(((r[0]+a)*r[0]+b)*r[0]+c) < 0.05f); // these asserts may not be safe at all scales, though they're in bezier t parameter units so maybe? + //STBTT_assert( STBTT_fabs(((r[1]+a)*r[1]+b)*r[1]+c) < 0.05f); + //STBTT_assert( STBTT_fabs(((r[2]+a)*r[2]+b)*r[2]+c) < 0.05f); + return 3; + } +} + +STBTT_DEF unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) +{ + float scale_x = scale, scale_y = scale; + int ix0,iy0,ix1,iy1; + int w,h; + unsigned char *data; + + if (scale == 0) return NULL; + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale, scale, 0.0f,0.0f, &ix0,&iy0,&ix1,&iy1); + + // if empty, return NULL + if (ix0 == ix1 || iy0 == iy1) + return NULL; + + ix0 -= padding; + iy0 -= padding; + ix1 += padding; + iy1 += padding; + + w = (ix1 - ix0); + h = (iy1 - iy0); + + if (width ) *width = w; + if (height) *height = h; + if (xoff ) *xoff = ix0; + if (yoff ) *yoff = iy0; + + // invert for y-downwards bitmaps + scale_y = -scale_y; + + { + int x,y,i,j; + float *precompute; + stbtt_vertex *verts; + int num_verts = stbtt_GetGlyphShape(info, glyph, &verts); + data = (unsigned char *) STBTT_malloc(w * h, info->userdata); + precompute = (float *) STBTT_malloc(num_verts * sizeof(float), info->userdata); + + for (i=0,j=num_verts-1; i < num_verts; j=i++) { + if (verts[i].type == STBTT_vline) { + float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; + float x1 = verts[j].x*scale_x, y1 = verts[j].y*scale_y; + float dist = (float) STBTT_sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0)); + precompute[i] = (dist == 0) ? 0.0f : 1.0f / dist; + } else if (verts[i].type == STBTT_vcurve) { + float x2 = verts[j].x *scale_x, y2 = verts[j].y *scale_y; + float x1 = verts[i].cx*scale_x, y1 = verts[i].cy*scale_y; + float x0 = verts[i].x *scale_x, y0 = verts[i].y *scale_y; + float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; + float len2 = bx*bx + by*by; + if (len2 != 0.0f) + precompute[i] = 1.0f / (bx*bx + by*by); + else + precompute[i] = 0.0f; + } else + precompute[i] = 0.0f; + } + + for (y=iy0; y < iy1; ++y) { + for (x=ix0; x < ix1; ++x) { + float val; + float min_dist = 999999.0f; + float sx = (float) x + 0.5f; + float sy = (float) y + 0.5f; + float x_gspace = (sx / scale_x); + float y_gspace = (sy / scale_y); + + int winding = stbtt__compute_crossings_x(x_gspace, y_gspace, num_verts, verts); // @OPTIMIZE: this could just be a rasterization, but needs to be line vs. non-tesselated curves so a new path + + for (i=0; i < num_verts; ++i) { + float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; + + if (verts[i].type == STBTT_vline && precompute[i] != 0.0f) { + float x1 = verts[i-1].x*scale_x, y1 = verts[i-1].y*scale_y; + + float dist,dist2 = (x0-sx)*(x0-sx) + (y0-sy)*(y0-sy); + if (dist2 < min_dist*min_dist) + min_dist = (float) STBTT_sqrt(dist2); + + // coarse culling against bbox + //if (sx > STBTT_min(x0,x1)-min_dist && sx < STBTT_max(x0,x1)+min_dist && + // sy > STBTT_min(y0,y1)-min_dist && sy < STBTT_max(y0,y1)+min_dist) + dist = (float) STBTT_fabs((x1-x0)*(y0-sy) - (y1-y0)*(x0-sx)) * precompute[i]; + STBTT_assert(i != 0); + if (dist < min_dist) { + // check position along line + // x' = x0 + t*(x1-x0), y' = y0 + t*(y1-y0) + // minimize (x'-sx)*(x'-sx)+(y'-sy)*(y'-sy) + float dx = x1-x0, dy = y1-y0; + float px = x0-sx, py = y0-sy; + // minimize (px+t*dx)^2 + (py+t*dy)^2 = px*px + 2*px*dx*t + t^2*dx*dx + py*py + 2*py*dy*t + t^2*dy*dy + // derivative: 2*px*dx + 2*py*dy + (2*dx*dx+2*dy*dy)*t, set to 0 and solve + float t = -(px*dx + py*dy) / (dx*dx + dy*dy); + if (t >= 0.0f && t <= 1.0f) + min_dist = dist; + } + } else if (verts[i].type == STBTT_vcurve) { + float x2 = verts[i-1].x *scale_x, y2 = verts[i-1].y *scale_y; + float x1 = verts[i ].cx*scale_x, y1 = verts[i ].cy*scale_y; + float box_x0 = STBTT_min(STBTT_min(x0,x1),x2); + float box_y0 = STBTT_min(STBTT_min(y0,y1),y2); + float box_x1 = STBTT_max(STBTT_max(x0,x1),x2); + float box_y1 = STBTT_max(STBTT_max(y0,y1),y2); + // coarse culling against bbox to avoid computing cubic unnecessarily + if (sx > box_x0-min_dist && sx < box_x1+min_dist && sy > box_y0-min_dist && sy < box_y1+min_dist) { + int num=0; + float ax = x1-x0, ay = y1-y0; + float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; + float mx = x0 - sx, my = y0 - sy; + float res[3] = {0.f,0.f,0.f}; + float px,py,t,it,dist2; + float a_inv = precompute[i]; + if (a_inv == 0.0) { // if a_inv is 0, it's 2nd degree so use quadratic formula + float a = 3*(ax*bx + ay*by); + float b = 2*(ax*ax + ay*ay) + (mx*bx+my*by); + float c = mx*ax+my*ay; + if (a == 0.0) { // if a is 0, it's linear + if (b != 0.0) { + res[num++] = -c/b; + } + } else { + float discriminant = b*b - 4*a*c; + if (discriminant < 0) + num = 0; + else { + float root = (float) STBTT_sqrt(discriminant); + res[0] = (-b - root)/(2*a); + res[1] = (-b + root)/(2*a); + num = 2; // don't bother distinguishing 1-solution case, as code below will still work + } + } + } else { + float b = 3*(ax*bx + ay*by) * a_inv; // could precompute this as it doesn't depend on sample point + float c = (2*(ax*ax + ay*ay) + (mx*bx+my*by)) * a_inv; + float d = (mx*ax+my*ay) * a_inv; + num = stbtt__solve_cubic(b, c, d, res); + } + dist2 = (x0-sx)*(x0-sx) + (y0-sy)*(y0-sy); + if (dist2 < min_dist*min_dist) + min_dist = (float) STBTT_sqrt(dist2); + + if (num >= 1 && res[0] >= 0.0f && res[0] <= 1.0f) { + t = res[0], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + if (num >= 2 && res[1] >= 0.0f && res[1] <= 1.0f) { + t = res[1], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + if (num >= 3 && res[2] >= 0.0f && res[2] <= 1.0f) { + t = res[2], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + } + } + } + if (winding == 0) + min_dist = -min_dist; // if outside the shape, value is negative + val = onedge_value + pixel_dist_scale * min_dist; + if (val < 0) + val = 0; + else if (val > 255) + val = 255; + data[(y-iy0)*w+(x-ix0)] = (unsigned char) val; + } + } + STBTT_free(precompute, info->userdata); + STBTT_free(verts, info->userdata); + } + return data; +} + +STBTT_DEF unsigned char * stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphSDF(info, scale, stbtt_FindGlyphIndex(info, codepoint), padding, onedge_value, pixel_dist_scale, width, height, xoff, yoff); +} + +STBTT_DEF void stbtt_FreeSDF(unsigned char *bitmap, void *userdata) +{ + STBTT_free(bitmap, userdata); +} + +////////////////////////////////////////////////////////////////////////////// +// +// font name matching -- recommended not to use this +// + +// check if a utf8 string contains a prefix which is the utf16 string; if so return length of matching utf8 string +static stbtt_int32 stbtt__CompareUTF8toUTF16_bigendian_prefix(stbtt_uint8 *s1, stbtt_int32 len1, stbtt_uint8 *s2, stbtt_int32 len2) +{ + stbtt_int32 i=0; + + // convert utf16 to utf8 and compare the results while converting + while (len2) { + stbtt_uint16 ch = s2[0]*256 + s2[1]; + if (ch < 0x80) { + if (i >= len1) return -1; + if (s1[i++] != ch) return -1; + } else if (ch < 0x800) { + if (i+1 >= len1) return -1; + if (s1[i++] != 0xc0 + (ch >> 6)) return -1; + if (s1[i++] != 0x80 + (ch & 0x3f)) return -1; + } else if (ch >= 0xd800 && ch < 0xdc00) { + stbtt_uint32 c; + stbtt_uint16 ch2 = s2[2]*256 + s2[3]; + if (i+3 >= len1) return -1; + c = ((ch - 0xd800) << 10) + (ch2 - 0xdc00) + 0x10000; + if (s1[i++] != 0xf0 + (c >> 18)) return -1; + if (s1[i++] != 0x80 + ((c >> 12) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((c >> 6) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((c ) & 0x3f)) return -1; + s2 += 2; // plus another 2 below + len2 -= 2; + } else if (ch >= 0xdc00 && ch < 0xe000) { + return -1; + } else { + if (i+2 >= len1) return -1; + if (s1[i++] != 0xe0 + (ch >> 12)) return -1; + if (s1[i++] != 0x80 + ((ch >> 6) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((ch ) & 0x3f)) return -1; + } + s2 += 2; + len2 -= 2; + } + return i; +} + +static int stbtt_CompareUTF8toUTF16_bigendian_internal(char *s1, int len1, char *s2, int len2) +{ + return len1 == stbtt__CompareUTF8toUTF16_bigendian_prefix((stbtt_uint8*) s1, len1, (stbtt_uint8*) s2, len2); +} + +// returns results in whatever encoding you request... but note that 2-byte encodings +// will be BIG-ENDIAN... use stbtt_CompareUTF8toUTF16_bigendian() to compare +STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID) +{ + stbtt_int32 i,count,stringOffset; + stbtt_uint8 *fc = font->data; + stbtt_uint32 offset = font->fontstart; + stbtt_uint32 nm = stbtt__find_table(fc, offset, "name"); + if (!nm) return NULL; + + count = ttUSHORT(fc+nm+2); + stringOffset = nm + ttUSHORT(fc+nm+4); + for (i=0; i < count; ++i) { + stbtt_uint32 loc = nm + 6 + 12 * i; + if (platformID == ttUSHORT(fc+loc+0) && encodingID == ttUSHORT(fc+loc+2) + && languageID == ttUSHORT(fc+loc+4) && nameID == ttUSHORT(fc+loc+6)) { + *length = ttUSHORT(fc+loc+8); + return (const char *) (fc+stringOffset+ttUSHORT(fc+loc+10)); + } + } + return NULL; +} + +static int stbtt__matchpair(stbtt_uint8 *fc, stbtt_uint32 nm, stbtt_uint8 *name, stbtt_int32 nlen, stbtt_int32 target_id, stbtt_int32 next_id) +{ + stbtt_int32 i; + stbtt_int32 count = ttUSHORT(fc+nm+2); + stbtt_int32 stringOffset = nm + ttUSHORT(fc+nm+4); + + for (i=0; i < count; ++i) { + stbtt_uint32 loc = nm + 6 + 12 * i; + stbtt_int32 id = ttUSHORT(fc+loc+6); + if (id == target_id) { + // find the encoding + stbtt_int32 platform = ttUSHORT(fc+loc+0), encoding = ttUSHORT(fc+loc+2), language = ttUSHORT(fc+loc+4); + + // is this a Unicode encoding? + if (platform == 0 || (platform == 3 && encoding == 1) || (platform == 3 && encoding == 10)) { + stbtt_int32 slen = ttUSHORT(fc+loc+8); + stbtt_int32 off = ttUSHORT(fc+loc+10); + + // check if there's a prefix match + stbtt_int32 matchlen = stbtt__CompareUTF8toUTF16_bigendian_prefix(name, nlen, fc+stringOffset+off,slen); + if (matchlen >= 0) { + // check for target_id+1 immediately following, with same encoding & language + if (i+1 < count && ttUSHORT(fc+loc+12+6) == next_id && ttUSHORT(fc+loc+12) == platform && ttUSHORT(fc+loc+12+2) == encoding && ttUSHORT(fc+loc+12+4) == language) { + slen = ttUSHORT(fc+loc+12+8); + off = ttUSHORT(fc+loc+12+10); + if (slen == 0) { + if (matchlen == nlen) + return 1; + } else if (matchlen < nlen && name[matchlen] == ' ') { + ++matchlen; + if (stbtt_CompareUTF8toUTF16_bigendian_internal((char*) (name+matchlen), nlen-matchlen, (char*)(fc+stringOffset+off),slen)) + return 1; + } + } else { + // if nothing immediately following + if (matchlen == nlen) + return 1; + } + } + } + + // @TODO handle other encodings + } + } + return 0; +} + +static int stbtt__matches(stbtt_uint8 *fc, stbtt_uint32 offset, stbtt_uint8 *name, stbtt_int32 flags) +{ + stbtt_int32 nlen = (stbtt_int32) STBTT_strlen((char *) name); + stbtt_uint32 nm,hd; + if (!stbtt__isfont(fc+offset)) return 0; + + // check italics/bold/underline flags in macStyle... + if (flags) { + hd = stbtt__find_table(fc, offset, "head"); + if ((ttUSHORT(fc+hd+44) & 7) != (flags & 7)) return 0; + } + + nm = stbtt__find_table(fc, offset, "name"); + if (!nm) return 0; + + if (flags) { + // if we checked the macStyle flags, then just check the family and ignore the subfamily + if (stbtt__matchpair(fc, nm, name, nlen, 16, -1)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 1, -1)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; + } else { + if (stbtt__matchpair(fc, nm, name, nlen, 16, 17)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 1, 2)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; + } + + return 0; +} + +static int stbtt_FindMatchingFont_internal(unsigned char *font_collection, char *name_utf8, stbtt_int32 flags) +{ + stbtt_int32 i; + for (i=0;;++i) { + stbtt_int32 off = stbtt_GetFontOffsetForIndex(font_collection, i); + if (off < 0) return off; + if (stbtt__matches((stbtt_uint8 *) font_collection, off, (stbtt_uint8*) name_utf8, flags)) + return off; + } +} + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +STBTT_DEF int stbtt_BakeFontBitmap(const unsigned char *data, int offset, + float pixel_height, unsigned char *pixels, int pw, int ph, + int first_char, int num_chars, stbtt_bakedchar *chardata) +{ + return stbtt_BakeFontBitmap_internal((unsigned char *) data, offset, pixel_height, pixels, pw, ph, first_char, num_chars, chardata); +} + +STBTT_DEF int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index) +{ + return stbtt_GetFontOffsetForIndex_internal((unsigned char *) data, index); +} + +STBTT_DEF int stbtt_GetNumberOfFonts(const unsigned char *data) +{ + return stbtt_GetNumberOfFonts_internal((unsigned char *) data); +} + +STBTT_DEF int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset) +{ + return stbtt_InitFont_internal(info, (unsigned char *) data, offset); +} + +STBTT_DEF int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags) +{ + return stbtt_FindMatchingFont_internal((unsigned char *) fontdata, (char *) name, flags); +} + +STBTT_DEF int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2) +{ + return stbtt_CompareUTF8toUTF16_bigendian_internal((char *) s1, len1, (char *) s2, len2); +} + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif + +#endif // STB_TRUETYPE_IMPLEMENTATION + + +// FULL VERSION HISTORY +// +// 1.25 (2021-07-11) many fixes +// 1.24 (2020-02-05) fix warning +// 1.23 (2020-02-02) query SVG data for glyphs; query whole kerning table (but only kern not GPOS) +// 1.22 (2019-08-11) minimize missing-glyph duplication; fix kerning if both 'GPOS' and 'kern' are defined +// 1.21 (2019-02-25) fix warning +// 1.20 (2019-02-07) PackFontRange skips missing codepoints; GetScaleFontVMetrics() +// 1.19 (2018-02-11) OpenType GPOS kerning (horizontal only), STBTT_fmod +// 1.18 (2018-01-29) add missing function +// 1.17 (2017-07-23) make more arguments const; doc fix +// 1.16 (2017-07-12) SDF support +// 1.15 (2017-03-03) make more arguments const +// 1.14 (2017-01-16) num-fonts-in-TTC function +// 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts +// 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual +// 1.11 (2016-04-02) fix unused-variable warning +// 1.10 (2016-04-02) allow user-defined fabs() replacement +// fix memory leak if fontsize=0.0 +// fix warning from duplicate typedef +// 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use alloc userdata for PackFontRanges +// 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges +// 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; +// allow PackFontRanges to pack and render in separate phases; +// fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); +// fixed an assert() bug in the new rasterizer +// replace assert() with STBTT_assert() in new rasterizer +// 1.06 (2015-07-14) performance improvements (~35% faster on x86 and x64 on test machine) +// also more precise AA rasterizer, except if shapes overlap +// remove need for STBTT_sort +// 1.05 (2015-04-15) fix misplaced definitions for STBTT_STATIC +// 1.04 (2015-04-15) typo in example +// 1.03 (2015-04-12) STBTT_STATIC, fix memory leak in new packing, various fixes +// 1.02 (2014-12-10) fix various warnings & compile issues w/ stb_rect_pack, C++ +// 1.01 (2014-12-08) fix subpixel position when oversampling to exactly match +// non-oversampled; STBTT_POINT_SIZE for packed case only +// 1.00 (2014-12-06) add new PackBegin etc. API, w/ support for oversampling +// 0.99 (2014-09-18) fix multiple bugs with subpixel rendering (ryg) +// 0.9 (2014-08-07) support certain mac/iOS fonts without an MS platformID +// 0.8b (2014-07-07) fix a warning +// 0.8 (2014-05-25) fix a few more warnings +// 0.7 (2013-09-25) bugfix: subpixel glyph bug fixed in 0.5 had come back +// 0.6c (2012-07-24) improve documentation +// 0.6b (2012-07-20) fix a few more warnings +// 0.6 (2012-07-17) fix warnings; added stbtt_ScaleForMappingEmToPixels, +// stbtt_GetFontBoundingBox, stbtt_IsGlyphEmpty +// 0.5 (2011-12-09) bugfixes: +// subpixel glyph renderer computed wrong bounding box +// first vertex of shape can be off-curve (FreeSans) +// 0.4b (2011-12-03) fixed an error in the font baking example +// 0.4 (2011-12-01) kerning, subpixel rendering (tor) +// bugfixes for: +// codepoint-to-glyph conversion using table fmt=12 +// codepoint-to-glyph conversion using table fmt=4 +// stbtt_GetBakedQuad with non-square texture (Zer) +// updated Hello World! sample to use kerning and subpixel +// fixed some warnings +// 0.3 (2009-06-24) cmap fmt=12, compound shapes (MM) +// userdata, malloc-from-userdata, non-zero fill (stb) +// 0.2 (2009-03-11) Fix unsigned/signed char warnings +// 0.1 (2009-03-09) First public release +// + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/3rdparty/stblib/stb b/3rdparty/stblib/stb deleted file mode 160000 index 013ac3bedd..0000000000 --- a/3rdparty/stblib/stb +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 013ac3beddff3dbffafd5177e7972067cd2b5083 diff --git a/3rdparty/unordered_dense/include/unordered_dense.h b/3rdparty/unordered_dense/include/unordered_dense.h deleted file mode 100644 index 13484a9817..0000000000 --- a/3rdparty/unordered_dense/include/unordered_dense.h +++ /dev/null @@ -1,2101 +0,0 @@ -///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// - -// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. -// Version 4.5.0 -// https://github.com/martinus/unordered_dense -// -// Licensed under the MIT License . -// SPDX-License-Identifier: MIT -// Copyright (c) 2022-2024 Martin Leitner-Ankerl -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -#ifndef ANKERL_UNORDERED_DENSE_H -#define ANKERL_UNORDERED_DENSE_H - -// see https://semver.org/spec/v2.0.0.html -#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes -#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 5 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality -#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes - -// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ - -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) -#define ANKERL_UNORDERED_DENSE_NAMESPACE \ - ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \ - ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH) - -#if defined(_MSVC_LANG) -# define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG -#else -# define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus -#endif - -#if defined(__GNUC__) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__)) -#elif defined(_MSC_VER) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop)) -#endif - -// exceptions -#if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND) -# define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1 // NOLINT(cppcoreguidelines-macro-usage) -#else -# define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0 // NOLINT(cppcoreguidelines-macro-usage) -#endif -#ifdef _MSC_VER -# define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline) -#else -# define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline)) -#endif - -// defined in unordered_dense.cpp -#if !defined(ANKERL_UNORDERED_DENSE_EXPORT) -# define ANKERL_UNORDERED_DENSE_EXPORT -#endif - -#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L -# error ankerl::unordered_dense requires C++17 or higher -#else -# include // for array -# include // for uint64_t, uint32_t, uint8_t, UINT64_C -# include // for size_t, memcpy, memset -# include // for equal_to, hash -# include // for initializer_list -# include // for pair, distance -# include // for numeric_limits -# include // for allocator, allocator_traits, shared_ptr -# include // for optional -# include // for out_of_range -# include // for basic_string -# include // for basic_string_view, hash -# include // for forward_as_tuple -# include // for enable_if_t, declval, conditional_t, ena... -# include // for forward, exchange, pair, as_const, piece... -# include // for vector -# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0 -# include // for abort -# endif - -# if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR) -# if __has_include() -# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage) -# include // for polymorphic_allocator -# elif __has_include() -# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage) -# include // for polymorphic_allocator -# endif -# endif - -# if defined(_MSC_VER) && defined(_M_X64) -# include -# pragma intrinsic(_umul128) -# endif - -# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) -# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage) -# else -# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage) -# endif - -namespace ankerl::unordered_dense { -inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE { - -namespace detail { - -# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() - -// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other -// inlinings more difficult. Throws are also generally the slow path. -[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_key_not_found() { - throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found"); -} -[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_bucket_overflow() { - throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size"); -} -[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_too_many_elements() { - throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements"); -} - -# else - -[[noreturn]] inline void on_error_key_not_found() { - abort(); -} -[[noreturn]] inline void on_error_bucket_overflow() { - abort(); -} -[[noreturn]] inline void on_error_too_many_elements() { - abort(); -} - -# endif - -} // namespace detail - -// hash /////////////////////////////////////////////////////////////////////// - -// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash -// No big-endian support (because different values on different machines don't matter), -// hardcodes seed and the secret, reformats the code, and clang-tidy fixes. -namespace detail::wyhash { - -inline void mum(uint64_t* a, uint64_t* b) { -# if defined(__SIZEOF_INT128__) - __uint128_t r = *a; - r *= *b; - *a = static_cast(r); - *b = static_cast(r >> 64U); -# elif defined(_MSC_VER) && defined(_M_X64) - *a = _umul128(*a, *b, b); -# else - uint64_t ha = *a >> 32U; - uint64_t hb = *b >> 32U; - uint64_t la = static_cast(*a); - uint64_t lb = static_cast(*b); - uint64_t hi{}; - uint64_t lo{}; - uint64_t rh = ha * hb; - uint64_t rm0 = ha * lb; - uint64_t rm1 = hb * la; - uint64_t rl = la * lb; - uint64_t t = rl + (rm0 << 32U); - auto c = static_cast(t < rl); - lo = t + (rm1 << 32U); - c += static_cast(lo < t); - hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; - *a = lo; - *b = hi; -# endif -} - -// multiply and xor mix function, aka MUM -[[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t { - mum(&a, &b); - return a ^ b; -} - -// read functions. WARNING: we don't care about endianness, so results are different on big endian! -[[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t { - uint64_t v{}; - std::memcpy(&v, p, 8U); - return v; -} - -[[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t { - uint32_t v{}; - std::memcpy(&v, p, 4); - return v; -} - -// reads 1, 2, or 3 bytes -[[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t { - return (static_cast(p[0]) << 16U) | (static_cast(p[k >> 1U]) << 8U) | p[k - 1]; -} - -[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t { - static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f), - UINT64_C(0xe7037ed1a0b428db), - UINT64_C(0x8ebc6af09c88c6e3), - UINT64_C(0x589965cc75374cc3)}; - - auto const* p = static_cast(key); - uint64_t seed = secret[0]; - uint64_t a{}; - uint64_t b{}; - if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) { - if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) { - a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U)); - b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U)); - } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) { - a = r3(p, len); - b = 0; - } else { - a = 0; - b = 0; - } - } else { - size_t i = len; - if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) { - uint64_t see1 = seed; - uint64_t see2 = seed; - do { - seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); - see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); - see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2); - p += 48; - i -= 48; - } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48)); - seed ^= see1 ^ see2; - } - while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) { - seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); - i -= 16; - p += 16; - } - a = r8(p + i - 16); - b = r8(p + i - 8); - } - - return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); -} - -[[nodiscard]] inline auto hash(uint64_t x) -> uint64_t { - return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); -} - -} // namespace detail::wyhash - -ANKERL_UNORDERED_DENSE_EXPORT template -struct hash { - auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) - -> uint64_t { - return std::hash{}(obj); - } -}; - -template -struct hash::is_avalanching> { - using is_avalanching = void; - auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) - -> uint64_t { - return std::hash{}(obj); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::basic_string const& str) const noexcept -> uint64_t { - return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size()); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::basic_string_view const& sv) const noexcept -> uint64_t { - return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size()); - } -}; - -template -struct hash { - using is_avalanching = void; - auto operator()(T* ptr) const noexcept -> uint64_t { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr)); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::unique_ptr const& ptr) const noexcept -> uint64_t { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr.get())); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::shared_ptr const& ptr) const noexcept -> uint64_t { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr.get())); - } -}; - -template -struct hash::value>::type> { - using is_avalanching = void; - auto operator()(Enum e) const noexcept -> uint64_t { - using underlying = typename std::underlying_type_t; - return detail::wyhash::hash(static_cast(e)); - } -}; - -template -struct tuple_hash_helper { - // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest. - // If it isn't an integral we need to hash it. - template - [[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t { - if constexpr (std::is_integral_v || std::is_enum_v) { - return static_cast(arg); - } else { - return hash{}(arg); - } - } - - [[nodiscard]] static auto mix64(uint64_t state, uint64_t v) -> uint64_t { - return detail::wyhash::mix(state + v, uint64_t{0x9ddfea08eb382d69}); - } - - // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If - // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized - // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer. - template - [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence) noexcept -> uint64_t { - auto h = uint64_t{}; - ((h = mix64(h, to64(std::get(t)))), ...); - return h; - } -}; - -template -struct hash> : tuple_hash_helper { - using is_avalanching = void; - auto operator()(std::tuple const& t) const noexcept -> uint64_t { - return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); - } -}; - -template -struct hash> : tuple_hash_helper { - using is_avalanching = void; - auto operator()(std::pair const& t) const noexcept -> uint64_t { - return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); - } -}; - -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ - template <> \ - struct hash { \ - using is_avalanching = void; \ - auto operator()(T const& obj) const noexcept -> uint64_t { \ - return detail::wyhash::hash(static_cast(obj)); \ - } \ - } - -# if defined(__GNUC__) && !defined(__clang__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wuseless-cast" -# endif -// see https://en.cppreference.com/w/cpp/utility/hash -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char); -# if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L && defined(__cpp_char8_t) -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t); -# endif -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long); - -# if defined(__GNUC__) && !defined(__clang__) -# pragma GCC diagnostic pop -# endif - -// bucket_type ////////////////////////////////////////////////////////// - -namespace bucket_type { - -struct standard { - static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint - static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint - - uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash - uint32_t m_value_idx; // index into the m_values vector. -}; - -ANKERL_UNORDERED_DENSE_PACK(struct big { - static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint - static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint - - uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash - size_t m_value_idx; // index into the m_values vector. -}); - -} // namespace bucket_type - -namespace detail { - -struct nonesuch {}; -struct default_container_t {}; - -template class Op, class... Args> -struct detector { - using value_t = std::false_type; - using type = Default; -}; - -template class Op, class... Args> -struct detector>, Op, Args...> { - using value_t = std::true_type; - using type = Op; -}; - -template