diff --git a/.ci/build-freebsd.sh b/.ci/build-freebsd.sh index 7f35412b58..8b8eef7f5f 100755 --- a/.ci/build-freebsd.sh +++ b/.ci/build-freebsd.sh @@ -1,10 +1,9 @@ #!/bin/sh -ex -# Pull all the submodules except some +# Pull all the submodules except llvm and opencv # Note: Tried to use git submodule status, but it takes over 20 seconds # shellcheck disable=SC2046 -git config --global --add safe.directory . -git submodule -q update --init --depth 1 $(awk '/path/ && !/llvm/ && !/opencv/ && !/libpng/ && !/libsdl-org/ && !/curl/ && !/zlib/ && !/libusb/ { print $3 }' .gitmodules) +git submodule -q update --init --depth 1 $(awk '/path/ && !/llvm/ && !/opencv/ { print $3 }' .gitmodules) CONFIGURE_ARGS=" -DWITH_LLVM=ON @@ -14,7 +13,6 @@ CONFIGURE_ARGS=" -DUSE_SYSTEM_FFMPEG=ON -DUSE_SYSTEM_CURL=ON -DUSE_SYSTEM_LIBPNG=ON - -DUSE_SYSTEM_LIBUSB=ON -DUSE_SYSTEM_OPENCV=ON " diff --git a/.ci/build-linux-aarch64.sh b/.ci/build-linux-aarch64.sh index 1fe640809c..50247786fe 100755 --- a/.ci/build-linux-aarch64.sh +++ b/.ci/build-linux-aarch64.sh @@ -1,14 +1,14 @@ #!/bin/sh -ex -cd rpcs3 || exit 1 - -shellcheck .ci/*.sh +if [ -z "$CIRRUS_CI" ]; then + cd rpcs3 || exit 1 +fi git config --global --add safe.directory '*' -# Pull all the submodules except some +# Pull all the submodules except llvm and opencv # shellcheck disable=SC2046 -git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/libsdl-org/ && !/curl/ && !/zlib/ { print $3 }' .gitmodules) +git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ { print $3 }' .gitmodules) mkdir build && cd build || exit 1 @@ -22,16 +22,15 @@ else export CXX="${CLANGXX_BINARY}" export LINKER="${LLD_BINARY}" fi - -export LINKER_FLAG="-fuse-ld=${LINKER}" +export CFLAGS="$CFLAGS -fuse-ld=${LINKER}" +export CXXFLAGS="$CXXFLAGS -fuse-ld=${LINKER}" cmake .. \ -DCMAKE_INSTALL_PREFIX=/usr \ -DUSE_NATIVE_INSTRUCTIONS=OFF \ -DUSE_PRECOMPILED_HEADERS=OFF \ - -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \ + -DCMAKE_C_FLAGS="$CFLAGS" \ + -DCMAKE_CXX_FLAGS="$CFLAGS" \ -DUSE_SYSTEM_CURL=ON \ -DUSE_SDL=ON \ -DUSE_SYSTEM_SDL=ON \ @@ -41,15 +40,19 @@ cmake .. \ -DOpenGL_GL_PREFERENCE=LEGACY \ -DLLVM_DIR=/opt/llvm/lib/cmake/llvm \ -DSTATIC_LINK_LLVM=ON \ - -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ -G Ninja ninja; build_status=$?; cd .. +shellcheck .ci/*.sh + # If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then +# Azure and Cirrus publish PRs as artifacts only. +{ [ "$CI_HAS_ARTIFACTS" = "true" ]; +} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false" + +if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then .ci/deploy-linux.sh "aarch64" fi diff --git a/.ci/build-linux.sh b/.ci/build-linux.sh index 13a9e802f3..e1466d1c01 100755 --- a/.ci/build-linux.sh +++ b/.ci/build-linux.sh @@ -1,15 +1,15 @@ #!/bin/sh -ex -cd rpcs3 || exit 1 - -shellcheck .ci/*.sh +if [ -z "$CIRRUS_CI" ]; then + cd rpcs3 || exit 1 +fi git config --global --add safe.directory '*' -# Pull all the submodules except some +# Pull all the submodules except llvm and opencv # Note: Tried to use git submodule status, but it takes over 20 seconds # shellcheck disable=SC2046 -git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/libsdl-org/ && !/curl/ && !/zlib/ { print $3 }' .gitmodules) +git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ { print $3 }' .gitmodules) mkdir build && cd build || exit 1 @@ -30,7 +30,7 @@ else export RANLIB=/usr/bin/llvm-ranlib-"$LLVMVER" fi -export LINKER_FLAG="-fuse-ld=${LINKER}" +export CFLAGS="$CFLAGS -fuse-ld=${LINKER}" cmake .. \ -DCMAKE_INSTALL_PREFIX=/usr \ @@ -38,9 +38,6 @@ cmake .. \ -DUSE_PRECOMPILED_HEADERS=OFF \ -DCMAKE_C_FLAGS="$CFLAGS" \ -DCMAKE_CXX_FLAGS="$CFLAGS" \ - -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \ -DCMAKE_AR="$AR" \ -DCMAKE_RANLIB="$RANLIB" \ -DUSE_SYSTEM_CURL=ON \ @@ -52,15 +49,19 @@ cmake .. \ -DOpenGL_GL_PREFERENCE=LEGACY \ -DLLVM_DIR=/opt/llvm/lib/cmake/llvm \ -DSTATIC_LINK_LLVM=ON \ - -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ -G Ninja ninja; build_status=$?; cd .. +shellcheck .ci/*.sh + # If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then +# Azure and Cirrus publish PRs as artifacts only. +{ [ "$CI_HAS_ARTIFACTS" = "true" ]; +} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false" + +if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then .ci/deploy-linux.sh "x86_64" fi diff --git a/.ci/build-mac-arm64.sh b/.ci/build-mac-arm64.sh old mode 100755 new mode 100644 index 3c9c864031..74a2d39bb1 --- a/.ci/build-mac-arm64.sh +++ b/.ci/build-mac-arm64.sh @@ -1,26 +1,66 @@ #!/bin/sh -ex # shellcheck disable=SC2086 +brew_arm64_install_packages() { + for pkg in "$@"; do + echo "Fetching bottle for $pkg (arm64)..." + bottle_path="$("$BREW_ARM64_PATH/bin/brew" --cache --bottle-tag=arm64_sonoma "$pkg")" + if [ ! -f "$bottle_path" ]; then + if ! "$BREW_ARM64_PATH/bin/brew" fetch --force --verbose --debug --bottle-tag=arm64_sonoma "$pkg"; then + echo "Failed to fetch bottle for $pkg" + return 1 + fi + bottle_path="$("$BREW_ARM64_PATH/bin/brew" --cache --bottle-tag=arm64_sonoma "$pkg")" + fi + + echo "Installing $pkg (arm64)..." + "$BREW_ARM64_PATH/bin/brew" install --force --force-bottle --ignore-dependencies "$bottle_path" || true + done +} + export HOMEBREW_NO_AUTO_UPDATE=1 export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 -export HOMEBREW_NO_ENV_HINTS=1 export HOMEBREW_NO_INSTALL_CLEANUP=1 -/opt/homebrew/bin/brew install -f --overwrite --quiet nasm ninja p7zip ccache pipenv gnutls freetype googletest #create-dmg -/opt/homebrew/bin/brew install -f --quiet ffmpeg@5 -/opt/homebrew/bin/brew install --quiet "llvm@$LLVM_COMPILER_VER" glew cmake sdl3 vulkan-headers coreutils -/opt/homebrew/bin/brew link -f --quiet "llvm@$LLVM_COMPILER_VER" ffmpeg@5 +/usr/local/bin/brew update +sudo rm -rf /usr/local/Cellar/curl /usr/local/opt/curl +/usr/local/bin/brew install -f --overwrite curl +/usr/local/bin/brew uninstall -f --ignore-dependencies ffmpeg +/usr/local/bin/brew install -f --build-from-source ffmpeg@5 || true +/usr/local/bin/brew install -f --overwrite python || true +/usr/local/bin/brew link --overwrite python || true +/usr/local/bin/brew install -f --overwrite nasm ninja p7zip ccache pipenv #create-dmg +/usr/local/bin/brew link -f curl || true +/usr/local/bin/brew install llvm@$LLVM_COMPILER_VER glew cmake sdl3 vulkan-headers coreutils +/usr/local/bin/brew link -f llvm@$LLVM_COMPILER_VER ffmpeg@5 || true -# moltenvk based on commit for 1.3.0 release -wget https://raw.githubusercontent.com/Homebrew/homebrew-core/7255441cbcafabaa8950f67c7ec55ff499dbb2d3/Formula/m/molten-vk.rb -/opt/homebrew/bin/brew install -f --overwrite --formula --quiet ./molten-vk.rb +export BREW_ARM64_PATH="/opt/homebrew1" +sudo mkdir -p "$BREW_ARM64_PATH" +sudo chmod 777 "$BREW_ARM64_PATH" +curl -L https://github.com/Homebrew/brew/tarball/master | tar xz --strip 1 -C "$BREW_ARM64_PATH" + +#"$BREW_ARM64_PATH/bin/brew" update +# libvorbis requires Homebrew-installed curl, but we can't run it on x64, and we also need the aarch64 libs, so we swap the binary +brew_arm64_install_packages curl +mv /opt/homebrew1/opt/curl/bin/curl /opt/homebrew1/opt/curl/bin/curl.bak +ln -s /usr/local/opt/curl/bin/curl /opt/homebrew1/opt/curl/bin/curl + +brew_arm64_install_packages 0mq aom aribb24 ca-certificates cjson dav1d ffmpeg@5 fontconfig freetype freetype2 gettext glew gmp gnutls lame libbluray libidn2 libnettle libogg libpng librist libsodium libsoxr libtasn libtasn1 libunistring libvmaf libvorbis libvpx libx11 libxau libxcb libxdmcp llvm@$LLVM_COMPILER_VER mbedtls molten-vk nettle opencore-amr openjpeg openssl opus p11-kit pkg-config pkgconfig pzstd rav1e sdl3 snappy speex srt svt-av1 theora vulkan-headers webp x264 x265 xz z3 zeromq zmq zstd +"$BREW_ARM64_PATH/bin/brew" link -f ffmpeg@5 +ln -s "/opt/homebrew1/opt/llvm@$LLVM_COMPILER_VER/lib/unwind/libunwind.1.dylib" "/opt/homebrew1/opt/llvm@$LLVM_COMPILER_VER/lib/libunwind.1.dylib" + +# moltenvk based on commit for 1.2.11 release +wget https://raw.githubusercontent.com/Homebrew/homebrew-core/6bfc8950c696d1f952425e8af2a6248603dc0df9/Formula/m/molten-vk.rb +/usr/local/bin/brew install -f --overwrite ./molten-vk.rb export CXX=clang++ export CC=clang export BREW_PATH; BREW_PATH="$(brew --prefix)" -export BREW_BIN="/opt/homebrew/bin" -export BREW_SBIN="/opt/homebrew/sbin" +export BREW_X64_PATH; +BREW_X64_PATH="$("/usr/local/bin/brew" --prefix)" +export BREW_BIN="/usr/local/bin" +export BREW_SBIN="/usr/local/sbin" export CMAKE_EXTRA_OPTS='-DLLVM_TARGETS_TO_BUILD=arm64' export WORKDIR; @@ -32,53 +72,49 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then git clone https://github.com/engnr/qt-downloader.git cd qt-downloader git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597 - # nested Qt 6.9.1 URL workaround + # nested Qt 6.8.3 URL workaround # sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader # sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader - # archived Qt 6.7.3 URL workaround - sed -i '' "s/official_releases/archive/g" qt-downloader cd "/tmp/Qt" - arch -arm64 "$BREW_PATH/bin/pipenv" run pip3 uninstall py7zr requests semantic_version lxml - arch -arm64 "$BREW_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml --no-cache + "$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64" - # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.1 workaround - arch -arm64 "$BREW_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64" + # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.3 workaround + "$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64" fi cd "$WORKDIR" ditto "/tmp/Qt/$QT_VER" "qt-downloader/$QT_VER" export Qt6_DIR="$WORKDIR/qt-downloader/$QT_VER/clang_64/lib/cmake/Qt$QT_VER_MAIN" -export SDL3_DIR="$BREW_PATH/opt/sdl3/lib/cmake/SDL3" +export SDL3_DIR="$BREW_ARM64_PATH/opt/sdl3/lib/cmake/SDL3" -export PATH="$BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH" -export LDFLAGS="-L$BREW_PATH/lib $BREW_PATH/opt/ffmpeg@5/lib/libavcodec.dylib $BREW_PATH/opt/ffmpeg@5/lib/libavformat.dylib $BREW_PATH/opt/ffmpeg@5/lib/libavutil.dylib $BREW_PATH/opt/ffmpeg@5/lib/libswscale.dylib $BREW_PATH/opt/ffmpeg@5/lib/libswresample.dylib $BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++.1.dylib $BREW_PATH/lib/libSDL3.dylib $BREW_PATH/lib/libGLEW.dylib $BREW_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/unwind/libunwind.1.dylib -Wl,-rpath,$BREW_PATH/lib" -export CPPFLAGS="-I$BREW_PATH/include -I$BREW_PATH/include -no-pie -D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" +export PATH="$BREW_X64_PATH/opt/llvm@$LLVM_COMPILER_VER/bin:$WORKDIR/qt-downloader/$QT_VER/clang_64/bin:$BREW_BIN:$BREW_SBIN:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/Library/Apple/usr/bin:$PATH" +export LDFLAGS="-L$BREW_ARM64_PATH/lib $BREW_ARM64_PATH/opt/ffmpeg@5/lib/libavcodec.dylib $BREW_ARM64_PATH/opt/ffmpeg@5/lib/libavformat.dylib $BREW_ARM64_PATH/opt/ffmpeg@5/lib/libavutil.dylib $BREW_ARM64_PATH/opt/ffmpeg@5/lib/libswscale.dylib $BREW_ARM64_PATH/opt/ffmpeg@5/lib/libswresample.dylib $BREW_ARM64_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++.1.dylib $BREW_ARM64_PATH/lib/libSDL3.dylib $BREW_ARM64_PATH/lib/libGLEW.dylib $BREW_ARM64_PATH/opt/llvm@$LLVM_COMPILER_VER/lib/libunwind.1.dylib -Wl,-rpath,$BREW_ARM64_PATH/lib" +export CPPFLAGS="-I$BREW_ARM64_PATH/include -I$BREW_X64_PATH/include -no-pie -D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" export CFLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" -export LIBRARY_PATH="$BREW_PATH/lib" -export LD_LIBRARY_PATH="$BREW_PATH/lib" +export LIBRARY_PATH="$BREW_ARM64_PATH/lib" +export LD_LIBRARY_PATH="$BREW_ARM64_PATH/lib" export VULKAN_SDK -VULKAN_SDK="$BREW_PATH/opt/molten-vk" +VULKAN_SDK="$BREW_ARM64_PATH/opt/molten-vk" ln -s "$VULKAN_SDK/lib/libMoltenVK.dylib" "$VULKAN_SDK/lib/libvulkan.dylib" || true export VK_ICD_FILENAMES="$VULKAN_SDK/share/vulkan/icd.d/MoltenVK_icd.json" export LLVM_DIR -LLVM_DIR="$BREW_PATH/opt/llvm@$LLVM_COMPILER_VER" -# exclude ffmpeg, LLVM, opencv, and sdl from submodule update +LLVM_DIR="$BREW_ARM64_PATH/opt/llvm@$LLVM_COMPILER_VER" +# exclude ffmpeg, LLVM, and sdl from submodule update # shellcheck disable=SC2046 -git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/opencv/ && !/SDL/ { print $3 }' .gitmodules) +git submodule -q update --init --depth=1 --jobs=8 $(awk '/path/ && !/ffmpeg/ && !/llvm/ && !/SDL/ { print $3 }' .gitmodules) # 3rdparty fixes sed -i '' "s/extern const double NSAppKitVersionNumber;/const double NSAppKitVersionNumber = 1343;/g" 3rdparty/hidapi/hidapi/mac/hid.c +rm -rf build mkdir build && cd build || exit 1 export MACOSX_DEPLOYMENT_TARGET=14.0 -"$BREW_PATH/bin/cmake" .. \ - -DBUILD_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ - -DRUN_RPCS3_TESTS="${RUN_UNIT_TESTS}" \ +"$BREW_X64_PATH/bin/cmake" .. \ -DUSE_SDL=ON \ -DUSE_DISCORD_RPC=ON \ -DUSE_VULKAN=ON \ @@ -101,21 +137,23 @@ export MACOSX_DEPLOYMENT_TARGET=14.0 -DUSE_SYSTEM_FAUDIO=OFF \ -DUSE_SYSTEM_SDL=ON \ -DUSE_SYSTEM_OPENCV=ON \ - "$CMAKE_EXTRA_OPTS" \ + $CMAKE_EXTRA_OPTS \ -DLLVM_TARGET_ARCH=arm64 \ -DCMAKE_OSX_ARCHITECTURES=arm64 \ - -DCMAKE_IGNORE_PATH="$BREW_PATH/lib" \ - -DCMAKE_IGNORE_PREFIX_PATH=/opt/homebrew/opt \ + -DCMAKE_IGNORE_PATH="$BREW_X64_PATH/lib" \ + -DCMAKE_IGNORE_PREFIX_PATH=/usr/local/opt \ + -DCMAKE_SYSTEM_PROCESSOR=arm64 \ + -DCMAKE_TOOLCHAIN_FILE=buildfiles/cmake/TCDarwinARM64.cmake \ -DCMAKE_CXX_FLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" \ - -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \ - -DCMAKE_OSX_SYSROOT="$(xcrun --sdk macosx --show-sdk-path)" \ -G Ninja "$BREW_PATH/bin/ninja"; build_status=$?; cd .. -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then +{ [ "$CI_HAS_ARTIFACTS" = "true" ]; +} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false" + +if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then .ci/deploy-mac-arm64.sh fi diff --git a/.ci/build-mac.sh b/.ci/build-mac.sh old mode 100755 new mode 100644 index 1e6bae01a1..b02417cbcb --- a/.ci/build-mac.sh +++ b/.ci/build-mac.sh @@ -3,24 +3,27 @@ # shellcheck disable=SC2086 export HOMEBREW_NO_AUTO_UPDATE=1 export HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK=1 -export HOMEBREW_NO_ENV_HINTS=1 -export HOMEBREW_NO_INSTALL_CLEANUP=1 +brew unlink certifi +brew install -f --overwrite nasm ninja p7zip ccache pipenv #create-dmg #/usr/sbin/softwareupdate --install-rosetta --agree-to-license arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" arch -x86_64 /usr/local/bin/brew update -arch -x86_64 /usr/local/bin/brew install -f --overwrite --quiet python || arch -x86_64 /usr/local/bin/brew link --overwrite python -arch -x86_64 /usr/local/bin/brew install -f --overwrite --quiet nasm ninja p7zip ccache pipenv gnutls freetype #create-dmg -arch -x86_64 /usr/local/bin/brew install -f --quiet ffmpeg@5 -arch -x86_64 /usr/local/bin/brew install --quiet "llvm@$LLVM_COMPILER_VER" glew cmake sdl3 vulkan-headers coreutils -arch -x86_64 /usr/local/bin/brew link -f --quiet "llvm@$LLVM_COMPILER_VER" ffmpeg@5 +arch -x86_64 /usr/local/bin/brew install -f --overwrite python || arch -x86_64 /usr/local/bin/brew link --overwrite python +arch -x86_64 /usr/local/bin/brew uninstall -f --ignore-dependencies ffmpeg +arch -x86_64 /usr/local/bin/brew install -f --build-from-source ffmpeg@5 +arch -x86_64 /usr/local/bin/brew reinstall -f --build-from-source gnutls freetype +arch -x86_64 /usr/local/bin/brew install llvm@$LLVM_COMPILER_VER glew cmake sdl3 vulkan-headers coreutils +arch -x86_64 /usr/local/bin/brew link -f llvm@$LLVM_COMPILER_VER ffmpeg@5 -# moltenvk based on commit for 1.3.0 release -wget https://raw.githubusercontent.com/Homebrew/homebrew-core/7255441cbcafabaa8950f67c7ec55ff499dbb2d3/Formula/m/molten-vk.rb -arch -x86_64 /usr/local/bin/brew install -f --overwrite --formula --quiet ./molten-vk.rb +# moltenvk based on commit for 1.2.11 release +wget https://raw.githubusercontent.com/Homebrew/homebrew-core/6bfc8950c696d1f952425e8af2a6248603dc0df9/Formula/m/molten-vk.rb +arch -x86_64 /usr/local/bin/brew install -f --overwrite ./molten-vk.rb export CXX=clang++ export CC=clang +export BREW_PATH; +BREW_PATH="$(brew --prefix)" export BREW_X64_PATH; BREW_X64_PATH="$("/usr/local/bin/brew" --prefix)" export BREW_BIN="/usr/local/bin" @@ -36,16 +39,14 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then git clone https://github.com/engnr/qt-downloader.git cd qt-downloader git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597 - # nested Qt 6.9.1 URL workaround + # nested Qt 6.8.3 URL workaround # sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader # sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader - # archived Qt 6.7.3 URL workaround - sed -i '' "s/official_releases/archive/g" qt-downloader cd "/tmp/Qt" - arch -x86_64 "$BREW_X64_PATH/bin/pipenv" --python "$BREW_X64_PATH/bin/python3" run pip3 install py7zr requests semantic_version lxml + "$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64" - # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.9.1 workaround - arch -x86_64 "$BREW_X64_PATH/bin/pipenv" --python "$BREW_X64_PATH/bin/python3" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64" + # sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.3 workaround + "$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64" fi cd "$WORKDIR" @@ -80,8 +81,6 @@ mkdir build && cd build || exit 1 export MACOSX_DEPLOYMENT_TARGET=14.0 "$BREW_X64_PATH/bin/cmake" .. \ - -DBUILD_RPCS3_TESTS=OFF \ - -DRUN_RPCS3_TESTS=OFF \ -DUSE_SDL=ON \ -DUSE_DISCORD_RPC=ON \ -DUSE_VULKAN=ON \ @@ -104,21 +103,20 @@ export MACOSX_DEPLOYMENT_TARGET=14.0 -DUSE_SYSTEM_FAUDIO=OFF \ -DUSE_SYSTEM_SDL=ON \ -DUSE_SYSTEM_OPENCV=ON \ - "$CMAKE_EXTRA_OPTS" \ + $CMAKE_EXTRA_OPTS \ -DLLVM_TARGET_ARCH=X86_64 \ -DCMAKE_OSX_ARCHITECTURES=x86_64 \ - -DCMAKE_IGNORE_PATH="$BREW_X64_PATH/lib" \ - -DCMAKE_IGNORE_PREFIX_PATH=/usr/local/opt \ + -DCMAKE_IGNORE_PATH="$BREW_PATH/lib" \ -DCMAKE_CXX_FLAGS="-D__MAC_OS_X_VERSION_MIN_REQUIRED=140000" \ - -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \ - -DCMAKE_OSX_SYSROOT="$(xcrun --sdk macosx --show-sdk-path)" \ -G Ninja -"$BREW_X64_PATH/bin/ninja"; build_status=$?; +"$BREW_PATH/bin/ninja"; build_status=$?; cd .. -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then +{ [ "$CI_HAS_ARTIFACTS" = "true" ]; +} && SHOULD_DEPLOY="true" || SHOULD_DEPLOY="false" + +if [ "$build_status" -eq 0 ] && [ "$SHOULD_DEPLOY" = "true" ]; then .ci/deploy-mac.sh fi diff --git a/.ci/build-windows-clang.sh b/.ci/build-windows-clang.sh deleted file mode 100644 index 0880e7f5ed..0000000000 --- a/.ci/build-windows-clang.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/sh -ex - -git config --global --add safe.directory '*' - -# Pull all the submodules except some -# Note: Tried to use git submodule status, but it takes over 20 seconds -# shellcheck disable=SC2046 -git submodule -q update --init $(awk '/path/ && !/llvm/ && !/opencv/ && !/ffmpeg/ && !/curl/ && !/FAudio/ && !/zlib/ { print $3 }' .gitmodules) - -mkdir build && cd build || exit 1 - -export CC="clang" -export CXX="clang++" -export LINKER=lld -export LINKER_FLAG="-fuse-ld=${LINKER}" - -if [ -n "$LLVMVER" ]; then - export AR="llvm-ar-$LLVMVER" - export RANLIB="llvm-ranlib-$LLVMVER" -else - export AR="llvm-ar" - export RANLIB="llvm-ranlib" -fi - -cmake .. \ - -DCMAKE_PREFIX_PATH=/clang64 \ - -DCMAKE_INSTALL_PREFIX=/usr \ - -DUSE_NATIVE_INSTRUCTIONS=OFF \ - -DUSE_PRECOMPILED_HEADERS=OFF \ - -DCMAKE_C_FLAGS="$CFLAGS" \ - -DCMAKE_CXX_FLAGS="$CFLAGS" \ - -DCMAKE_EXE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_MODULE_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_SHARED_LINKER_FLAGS="${LINKER_FLAG}" \ - -DCMAKE_AR="$AR" \ - -DCMAKE_RANLIB="$RANLIB" \ - -DUSE_SYSTEM_CURL=ON \ - -DUSE_FAUDIO=OFF \ - -DUSE_SDL=ON \ - -DUSE_SYSTEM_SDL=OFF \ - -DUSE_SYSTEM_FFMPEG=ON \ - -DUSE_SYSTEM_OPENCV=ON \ - -DUSE_SYSTEM_OPENAL=OFF \ - -DUSE_DISCORD_RPC=ON \ - -DOpenGL_GL_PREFERENCE=LEGACY \ - -DWITH_LLVM=ON \ - -DLLVM_DIR=/clang64/lib/cmake/llvm \ - -DVulkan_LIBRARY=/clang64/lib/libvulkan-1.dll.a \ - -DSTATIC_LINK_LLVM=ON \ - -DBUILD_RPCS3_TESTS=OFF \ - -DRUN_RPCS3_TESTS=OFF \ - -G Ninja - -ninja; build_status=$?; - -cd .. - -# If it compiled succesfully let's deploy. -if [ "$build_status" -eq 0 ]; then - .ci/deploy-windows-clang.sh "x86_64" -fi diff --git a/.ci/deploy-linux.sh b/.ci/deploy-linux.sh index f8c3d849c3..38a1d3e05c 100755 --- a/.ci/deploy-linux.sh +++ b/.ci/deploy-linux.sh @@ -25,17 +25,11 @@ if [ "$DEPLOY_APPIMAGE" = "true" ]; then # Remove libvulkan because it causes issues with gamescope rm -f ./AppDir/usr/lib/libvulkan.so* - # Remove unused Qt6 libraries - rm -f ./AppDir/usr/lib/libQt6VirtualKeyboard.so* - rm -f ./AppDir/usr/plugins/platforminputcontexts/libqtvirtualkeyboardplugin.so* - # Remove git directory containing local commit history file rm -rf ./AppDir/usr/share/rpcs3/git - curl -fsSLo /uruntime "https://github.com/VHSgunzo/uruntime/releases/download/v0.3.4/uruntime-appimage-dwarfs-$CPU_ARCH" - chmod +x /uruntime - /uruntime --appimage-mkdwarfs -f --set-owner 0 --set-group 0 --no-history --no-create-timestamp \ - --compression zstd:level=22 -S26 -B32 --header /uruntime -i AppDir -o RPCS3.AppImage + linuxdeploy --appimage-extract + ./squashfs-root/plugins/linuxdeploy-plugin-appimage/usr/bin/appimagetool AppDir -g APPIMAGE_SUFFIX="linux_${CPU_ARCH}" if [ "$CPU_ARCH" = "x86_64" ]; then diff --git a/.ci/deploy-llvm.sh b/.ci/deploy-llvm.sh deleted file mode 100644 index 35e5d780af..0000000000 --- a/.ci/deploy-llvm.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -ex - -# First let's print some info about our caches -"$(cygpath -u "$CCACHE_BIN_DIR")"/ccache.exe --show-stats -v - -# BUILD_blablabla is Azure specific, so we wrap it for portability -ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY" -BUILD="llvmlibs_mt.7z" - -# Package artifacts -7z a -m0=LZMA2 -mx9 "$BUILD" ./build/lib/Release-x64/llvm_build - -# Generate sha256 hashes -# Write to file for GitHub releases -sha256sum "$BUILD" | awk '{ print $1 }' | tee "$BUILD.sha256" -echo "$(cat "$BUILD.sha256");$(stat -c %s "$BUILD")B" > GitHubReleaseMessage.txt - -# Move files to publishing directory -cp -- "$BUILD" "$ARTIFACT_DIR" -cp -- "$BUILD.sha256" "$ARTIFACT_DIR" diff --git a/.ci/deploy-mac-arm64.sh b/.ci/deploy-mac-arm64.sh old mode 100755 new mode 100644 index e7de472378..da1fbe9165 --- a/.ci/deploy-mac-arm64.sh +++ b/.ci/deploy-mac-arm64.sh @@ -16,9 +16,9 @@ echo "AVVER=$AVVER" >> ../.ci/ci-vars.env cd bin mkdir "rpcs3.app/Contents/lib/" || true -cp "$(realpath /opt/homebrew/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++abi.1.0.dylib)" "rpcs3.app/Contents/Frameworks/libc++abi.1.dylib" -cp "$(realpath /opt/homebrew/lib/libsharpyuv.0.dylib)" "rpcs3.app/Contents/lib/libsharpyuv.0.dylib" -cp "$(realpath /opt/homebrew/lib/libintl.8.dylib)" "rpcs3.app/Contents/lib/libintl.8.dylib" +cp "$(realpath /opt/homebrew1/opt/llvm@$LLVM_COMPILER_VER/lib/c++/libc++abi.1.0.dylib)" "rpcs3.app/Contents/Frameworks/libc++abi.1.dylib" +cp "$(realpath /opt/homebrew1/lib/libsharpyuv.0.dylib)" "rpcs3.app/Contents/lib/libsharpyuv.0.dylib" +cp "$(realpath /opt/homebrew1/lib/libintl.8.dylib)" "rpcs3.app/Contents/lib/libintl.8.dylib" rm -rf "rpcs3.app/Contents/Frameworks/QtPdf.framework" \ "rpcs3.app/Contents/Frameworks/QtQml.framework" \ @@ -33,8 +33,10 @@ rm -rf "rpcs3.app/Contents/Frameworks/QtPdf.framework" \ # Hack install_name_tool \ +-delete_rpath /opt/homebrew1/lib \ -delete_rpath /opt/homebrew/lib \ --delete_rpath /opt/homebrew/opt/llvm@$LLVM_COMPILER_VER/lib RPCS3.app/Contents/MacOS/rpcs3 +-delete_rpath /opt/homebrew1/opt/llvm@$LLVM_COMPILER_VER/lib \ +-delete_rpath /usr/local/lib RPCS3.app/Contents/MacOS/rpcs3 #-delete_rpath /opt/homebrew1/Cellar/sdl3/3.2.8/lib # Need to do this rename hack due to case insensitive filesystem @@ -65,7 +67,7 @@ echo "IconIndex=0" >> Quickstart.url #SHA256SUM=$(shasum -a 256 "$DMG_FILEPATH" | awk '{ print $1 }') ARCHIVE_FILEPATH="$BUILD_ARTIFACTSTAGINGDIRECTORY/rpcs3-v${COMM_TAG}-${COMM_COUNT}-${COMM_HASH}_macos_arm64.7z" -"$BREW_PATH/bin/7z" a -mx9 "$ARCHIVE_FILEPATH" RPCS3.app Quickstart.url +"$BREW_X64_PATH/bin/7z" a -mx9 "$ARCHIVE_FILEPATH" RPCS3.app Quickstart.url FILESIZE=$(stat -f %z "$ARCHIVE_FILEPATH") SHA256SUM=$(shasum -a 256 "$ARCHIVE_FILEPATH" | awk '{ print $1 }') diff --git a/.ci/deploy-mac.sh b/.ci/deploy-mac.sh old mode 100755 new mode 100644 diff --git a/.ci/deploy-windows-clang.sh b/.ci/deploy-windows-clang.sh deleted file mode 100644 index d45cb45acf..0000000000 --- a/.ci/deploy-windows-clang.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -ex - -# source ci-vars.env -# shellcheck disable=SC1091 -. .ci/ci-vars.env - -cd build || exit 1 - -CPU_ARCH="${1:-x86_64}" - -echo "Deploying rpcs3 windows clang $CPU_ARCH" - -# BUILD_blablabla is CI specific, so we wrap it for portability -ARTIFACT_DIR=$(cygpath -u "$BUILD_ARTIFACTSTAGINGDIRECTORY") -MSYS2_CLANG_BIN=$(cygpath -w /clang64/bin) -MSYS2_USR_BIN=$(cygpath -w /usr/bin) - -echo "Installing dependencies of: ./bin/rpcs3.exe (MSYS2 dir is '$MSYS2_CLANG_BIN', usr dir is '$MSYS2_USR_BIN')" -cmake -DMSYS2_CLANG_BIN="$MSYS2_CLANG_BIN" -DMSYS2_USR_BIN="$MSYS2_USR_BIN" -Dexe=./bin/rpcs3.exe -P ../buildfiles/cmake/CopyRuntimeDependencies.cmake - -# Prepare compatibility and SDL database for packaging -mkdir ./bin/config -mkdir ./bin/config/input_configs -curl -fsSL 'https://raw.githubusercontent.com/gabomdq/SDL_GameControllerDB/master/gamecontrollerdb.txt' 1> ./bin/config/input_configs/gamecontrollerdb.txt -curl -fsSL 'https://rpcs3.net/compatibility?api=v1&export' | iconv -t UTF-8 1> ./bin/GuiConfigs/compat_database.dat - -# Package artifacts -7z a -m0=LZMA2 -mx9 "$BUILD" ./bin/* - -# Generate sha256 hashes -# Write to file for GitHub releases -sha256sum "$BUILD" | awk '{ print $1 }' | tee "$BUILD.sha256" -echo "$(cat "$BUILD.sha256");$(stat -c %s "$BUILD")B" > GitHubReleaseMessage.txt - -# Move files to publishing directory -mkdir -p "$ARTIFACT_DIR" -cp -- "$BUILD" "$ARTIFACT_DIR" -cp -- "$BUILD.sha256" "$ARTIFACT_DIR" diff --git a/.ci/deploy-windows.sh b/.ci/deploy-windows.sh index b885831511..e109dee9e1 100755 --- a/.ci/deploy-windows.sh +++ b/.ci/deploy-windows.sh @@ -1,13 +1,14 @@ #!/bin/sh -ex -# First let's print some info about our caches +# First let's see print some info about our caches "$(cygpath -u "$CCACHE_BIN_DIR")"/ccache.exe --show-stats -v -# BUILD_blablabla is CI specific, so we wrap it for portability +# BUILD_blablabla is Azure specific, so we wrap it for portability ARTIFACT_DIR="$BUILD_ARTIFACTSTAGINGDIRECTORY" # Remove unecessary files rm -f ./bin/rpcs3.exp ./bin/rpcs3.lib ./bin/rpcs3.pdb ./bin/vc_redist.x64.exe +rm -rf ./bin/git # Prepare compatibility and SDL database for packaging mkdir ./bin/config diff --git a/.ci/docker.env b/.ci/docker.env index eb70b68c18..2b36fb34c0 100644 --- a/.ci/docker.env +++ b/.ci/docker.env @@ -1,4 +1,5 @@ -# Variables set by CI +# Variables set by Azure Pipelines +CI_HAS_ARTIFACTS BUILD_REASON BUILD_SOURCEVERSION BUILD_ARTIFACTSTAGINGDIRECTORY @@ -7,7 +8,6 @@ BUILD_SOURCEBRANCHNAME APPDIR ARTDIR RELEASE_MESSAGE -RUN_UNIT_TESTS # Variables for build matrix COMPILER DEPLOY_APPIMAGE diff --git a/.ci/export-azure-vars.sh b/.ci/export-azure-vars.sh new file mode 100755 index 0000000000..033dd41cc8 --- /dev/null +++ b/.ci/export-azure-vars.sh @@ -0,0 +1,13 @@ +#!/bin/sh -e + +# Export variables for later stages of the Azure pipeline +# Values done in this manner will appear as environment variables +# in later stages. + +# From pure-sh-bible +# Setting 'IFS' tells 'read' where to split the string. +while IFS='=' read -r key val; do + # Skip over lines containing comments. + [ "${key##\#*}" ] || continue + echo "##vso[task.setvariable variable=$key]$val" +done < ".ci/ci-vars.env" diff --git a/.ci/export-cirrus-vars.sh b/.ci/export-cirrus-vars.sh new file mode 100644 index 0000000000..561e77e92f --- /dev/null +++ b/.ci/export-cirrus-vars.sh @@ -0,0 +1,13 @@ +#!/bin/sh -e + +# Export variables for later stages of the Cirrus pipeline +# Values done in this manner will appear as environment variables +# in later stages. + +# From pure-sh-bible +# Setting 'IFS' tells 'read' where to split the string. +while IFS='=' read -r key val; do + # Skip over lines containing comments. + [ "${key##\#*}" ] || continue + export "$key"="$val" +done < ".ci/ci-vars.env" diff --git a/.ci/get_keys-windows.sh b/.ci/get_keys-windows.sh old mode 100755 new mode 100644 index 8384b4de5d..8c7ea9c14e --- a/.ci/get_keys-windows.sh +++ b/.ci/get_keys-windows.sh @@ -1,3 +1,4 @@ #!/bin/sh -ex curl -fLo "./llvm.lock" "https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-${LLVM_VER}/llvmlibs_mt.7z.sha256" +curl -fLo "./glslang.lock" "https://github.com/RPCS3/glslang/releases/download/custom-build-win/glslanglibs_mt.7z.sha256" diff --git a/.ci/install-freebsd.sh b/.ci/install-freebsd.sh index 04efc6c0e9..de10561dfa 100755 --- a/.ci/install-freebsd.sh +++ b/.ci/install-freebsd.sh @@ -15,4 +15,4 @@ pkg install "llvm$LLVM_COMPILER_VER" pkg install git ccache cmake ninja "qt$QT_VER_MAIN-multimedia" "qt$QT_VER_MAIN-svg" glew openal-soft ffmpeg # Optional dependencies (libevdev is pulled by qtX-base) -pkg install pkgconf alsa-lib pulseaudio sdl3 evdev-proto vulkan-headers vulkan-loader opencv +pkg install pkgconf alsa-lib pulseaudio sdl3 evdev-proto vulkan-headers vulkan-loader diff --git a/.ci/optimize-mac.sh b/.ci/optimize-mac.sh old mode 100755 new mode 100644 diff --git a/.ci/setup-llvm.sh b/.ci/setup-llvm.sh deleted file mode 100644 index a54901309e..0000000000 --- a/.ci/setup-llvm.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/sh -ex - -# Resource/dependency URLs -CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.11.2/ccache-4.11.2-windows-x86_64.zip" - -DEP_URLS=" \ - $CCACHE_URL" - -# CI doesn't make a cache dir if it doesn't exist, so we do it manually -[ -d "$DEPS_CACHE_DIR" ] || mkdir "$DEPS_CACHE_DIR" - -# Pull the llvm submodule -# shellcheck disable=SC2046 -git submodule -q update --init --depth=1 -- 3rdparty/llvm - -# Git bash doesn't have rev, so here it is -rev() -{ - echo "$1" | awk '{ for(i = length($0); i != 0; --i) { a = a substr($0, i, 1); } } END { print a }' -} - -# Usage: download_and_verify url checksum algo file -# Check to see if a file is already cached, and the checksum matches. If not, download it. -# Tries up to 3 times -download_and_verify() -{ - url="$1" - correctChecksum="$2" - algo="$3" - fileName="$4" - - for _ in 1 2 3; do - [ -e "$DEPS_CACHE_DIR/$fileName" ] || curl -fLo "$DEPS_CACHE_DIR/$fileName" "$url" - fileChecksum=$("${algo}sum" "$DEPS_CACHE_DIR/$fileName" | awk '{ print $1 }') - [ "$fileChecksum" = "$correctChecksum" ] && return 0 - done - - return 1; -} - -# Some dependencies install here -[ -d "./build/lib_ext/Release-x64" ] || mkdir -p "./build/lib_ext/Release-x64" - -for url in $DEP_URLS; do - # Get the filename from the URL and remove query strings (?arg=something). - fileName="$(rev "$(rev "$url" | cut -d'/' -f1)" | cut -d'?' -f1)" - [ -z "$fileName" ] && echo "Unable to parse url: $url" && exit 1 - - # shellcheck disable=SC1003 - case "$url" in - *ccache*) checksum=$CCACHE_SHA; algo="sha256"; outDir="$CCACHE_BIN_DIR" ;; - *) echo "Unknown url resource: $url"; exit 1 ;; - esac - - download_and_verify "$url" "$checksum" "$algo" "$fileName" - 7z x -y "$DEPS_CACHE_DIR/$fileName" -aos -o"$outDir" -done - -# Setup ccache tool -[ -d "$CCACHE_DIR" ] || mkdir -p "$(cygpath -u "$CCACHE_DIR")" -CCACHE_SH_DIR=$(cygpath -u "$CCACHE_BIN_DIR") -mv "$CCACHE_SH_DIR"/ccache-*/* "$CCACHE_SH_DIR" -cp "$CCACHE_SH_DIR"/ccache.exe "$CCACHE_SH_DIR"/cl.exe diff --git a/.ci/setup-windows-ci-vars.sh b/.ci/setup-windows-ci-vars.sh deleted file mode 100644 index 11373e0716..0000000000 --- a/.ci/setup-windows-ci-vars.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -ex - -CPU_ARCH="${1:-win64}" -COMPILER="${2:-msvc}" - -# These are CI specific, so we wrap them for portability -REPO_NAME="$BUILD_REPOSITORY_NAME" -REPO_BRANCH="$BUILD_SOURCEBRANCHNAME" -PR_NUMBER="$BUILD_PR_NUMBER" - -# Gather explicit version number and number of commits -COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) -COMM_COUNT=$(git rev-list --count HEAD) -COMM_HASH=$(git rev-parse --short=8 HEAD) - -# Format the above into filenames -if [ -n "$PR_NUMBER" ]; then - AVVER="${COMM_TAG}-${COMM_HASH}" - BUILD_RAW="rpcs3-v${AVVER}_${CPU_ARCH}_${COMPILER}" - BUILD="${BUILD_RAW}.7z" -else - AVVER="${COMM_TAG}-${COMM_COUNT}" - BUILD_RAW="rpcs3-v${AVVER}-${COMM_HASH}_${CPU_ARCH}_${COMPILER}" - BUILD="${BUILD_RAW}.7z" -fi - -# BRANCH is used for experimental build warnings for pr builds, used in main_window.cpp. -# BUILD is the name of the release artifact -# BUILD_RAW is just filename -# AVVER is used for GitHub releases, it is the version number. -BRANCH="${REPO_NAME}/${REPO_BRANCH}" - -# SC2129 -{ - echo "BRANCH=$BRANCH" - echo "BUILD=$BUILD" - echo "BUILD_RAW=$BUILD_RAW" - echo "AVVER=$AVVER" -} >> .ci/ci-vars.env diff --git a/.ci/setup-windows.sh b/.ci/setup-windows.sh index 11a68367b8..f1073c9218 100755 --- a/.ci/setup-windows.sh +++ b/.ci/setup-windows.sh @@ -1,5 +1,10 @@ #!/bin/sh -ex +# These are Azure specific, so we wrap them for portability +REPO_NAME="$BUILD_REPOSITORY_NAME" +REPO_BRANCH="$SYSTEM_PULLREQUEST_SOURCEBRANCH" +PR_NUMBER="$SYSTEM_PULLREQUEST_PULLREQUESTID" + # Resource/dependency URLs # Qt mirrors can be volatile and slow, so we list 2 #QT_HOST="http://mirrors.ocf.berkeley.edu/qt/" @@ -15,8 +20,9 @@ QT_TOOL_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qttools${QT_SUFFIX}" QT_MM_URL="${QT_HOST}${QT_PREFIX}addons.qtmultimedia.${QT_PREFIX_2}qtmultimedia${QT_SUFFIX}" QT_SVG_URL="${QT_HOST}${QT_PREFIX}${QT_PREFIX_2}qtsvg${QT_SUFFIX}" LLVMLIBS_URL="https://github.com/RPCS3/llvm-mirror/releases/download/custom-build-win-${LLVM_VER}/llvmlibs_mt.7z" +GLSLANG_URL='https://github.com/RPCS3/glslang/releases/latest/download/glslanglibs_mt.7z' VULKAN_SDK_URL="https://www.dropbox.com/scl/fi/sjjh0fc4ld281pjbl2xzu/VulkanSDK-${VULKAN_VER}-Installer.exe?rlkey=f6wzc0lvms5vwkt2z3qabfv9d&dl=1" -CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.11.2/ccache-4.11.2-windows-x86_64.zip" +CCACHE_URL="https://github.com/ccache/ccache/releases/download/v4.10.2/ccache-4.10.2-windows-x86_64.zip" DEP_URLS=" \ $QT_BASE_URL \ @@ -25,10 +31,11 @@ DEP_URLS=" \ $QT_MM_URL \ $QT_SVG_URL \ $LLVMLIBS_URL \ + $GLSLANG_URL \ $VULKAN_SDK_URL\ $CCACHE_URL" -# CI doesn't make a cache dir if it doesn't exist, so we do it manually +# Azure pipelines doesn't make a cache dir if it doesn't exist, so we do it manually [ -d "$DEPS_CACHE_DIR" ] || mkdir "$DEPS_CACHE_DIR" # Pull all the submodules except llvm, since it is built separately and we just download that build @@ -73,6 +80,7 @@ for url in $DEP_URLS; do case "$url" in *qt*) checksum=$(curl -fL "${url}.sha1"); algo="sha1"; outDir="$QTDIR/" ;; *llvm*) checksum=$(curl -fL "${url}.sha256"); algo="sha256"; outDir="./build/lib_ext/Release-x64" ;; + *glslang*) checksum=$(curl -fL "${url}.sha256"); algo="sha256"; outDir="./build/lib_ext/Release-x64" ;; *ccache*) checksum=$CCACHE_SHA; algo="sha256"; outDir="$CCACHE_BIN_DIR" ;; *Vulkan*) # Vulkan setup needs to be run in batch environment @@ -94,3 +102,33 @@ done CCACHE_SH_DIR=$(cygpath -u "$CCACHE_BIN_DIR") mv "$CCACHE_SH_DIR"/ccache-*/* "$CCACHE_SH_DIR" cp "$CCACHE_SH_DIR"/ccache.exe "$CCACHE_SH_DIR"/cl.exe + +# Gather explicit version number and number of commits +COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) +COMM_COUNT=$(git rev-list --count HEAD) +COMM_HASH=$(git rev-parse --short=8 HEAD) + +# Format the above into filenames +if [ -n "$PR_NUMBER" ]; then + AVVER="${COMM_TAG}-${COMM_HASH}" + BUILD_RAW="rpcs3-v${AVVER}_win64" + BUILD="${BUILD_RAW}.7z" +else + AVVER="${COMM_TAG}-${COMM_COUNT}" + BUILD_RAW="rpcs3-v${AVVER}-${COMM_HASH}_win64" + BUILD="${BUILD_RAW}.7z" +fi + +# BRANCH is used for experimental build warnings for pr builds, used in main_window.cpp. +# BUILD is the name of the release artifact +# BUILD_RAW is just filename +# AVVER is used for GitHub releases, it is the version number. +BRANCH="${REPO_NAME}/${REPO_BRANCH}" + +# SC2129 +{ + echo "BRANCH=$BRANCH" + echo "BUILD=$BUILD" + echo "BUILD_RAW=$BUILD_RAW" + echo "AVVER=$AVVER" +} >> .ci/ci-vars.env diff --git a/.cirrus.yml b/.cirrus.yml new file mode 100644 index 0000000000..3b71413102 --- /dev/null +++ b/.cirrus.yml @@ -0,0 +1,153 @@ +env: + CIRRUS_CLONE_DEPTH: 0 # Unshallow clone to obtain proper GIT_VERSION + BUILD_REPOSITORY_NAME: $CIRRUS_REPO_FULL_NAME + SYSTEM_PULLREQUEST_SOURCEBRANCH: $CIRRUS_BRANCH + SYSTEM_PULLREQUEST_PULLREQUESTID: $CIRRUS_PR + BUILD_SOURCEVERSION: $CIRRUS_CHANGE_IN_REPO + BUILD_SOURCEBRANCHNAME: $CIRRUS_BRANCH + RPCS3_TOKEN: ENCRYPTED[100ebb8e3552bf2021d0ef55dccda3e58d27be5b6cab0b0b92843ef490195d3c4edaefa087e4a3b425caa6392300b9b1] + QT_VER_MAIN: '6' + QT_VER: '6.8.3' + LLVM_COMPILER_VER: '19' + LLVM_VER: '19.1.7' + +# windows_task: +# matrix: +# - name: Cirrus Windows +# windows_container: +# image: cirrusci/windowsservercore:visualstudio2019 +# cpu: 8 +# memory: 16G +# env: +# CIRRUS_SHELL: "bash" +# COMPILER: msvc +# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}\artifacts\ +# QT_VER_MSVC: 'msvc2022' +# QT_DATE: '202503201308' +# QTDIR: C:\Qt\${QT_VER}\${QT_VER_MSVC}_64 +# VULKAN_VER: '1.3.268.0' +# VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5' +# VULKAN_SDK: C:\VulkanSDK\${VULKAN_VER} +# CACHE_DIR: "./cache" +# UPLOAD_COMMIT_HASH: 7d09e3be30805911226241afbb14f8cdc2eb054e +# UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-win" +# deps_cache: +# folder: "./cache" +# #obj_cache: +# # folder: "./tmp" +# #obj2_cache: +# # folder: "./rpcs3/x64" +# setup_script: +# - './.ci/get_keys-windows.sh' +# - './.ci/setup-windows.sh' +# rpcs3_script: +# - export PATH=${PATH}:"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin" +# - msbuild.exe rpcs3.sln //p:Configuration=Release //m +# deploy_script: +# - mkdir artifacts +# - source './.ci/export-cirrus-vars.sh' +# - './.ci/deploy-windows.sh' +# artifacts: +# name: Artifact +# path: "*.7z*" +# push_script: | +# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ]; then +# source './.ci/export-cirrus-vars.sh' +# './.ci/github-upload.sh' +# fi; + +# linux_task: +# container: +# image: rpcs3/rpcs3-ci-jammy:1.2 +# cpu: 4 +# memory: 16G +# env: +# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}/artifacts +# ARTDIR: ${CIRRUS_WORKING_DIR}/artifacts/ +# CCACHE_DIR: "/tmp/ccache_dir" +# CCACHE_MAXSIZE: 300M +# CI_HAS_ARTIFACTS: true +# UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f +# UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux" +# DEPLOY_APPIMAGE: true +# APPDIR: "./appdir" +# RELEASE_MESSAGE: "../GitHubReleaseMessage.txt" +# ccache_cache: +# folder: "/tmp/ccache_dir" +# matrix: +# - name: Cirrus Linux GCC +# env: +# COMPILER: gcc +# gcc_script: +# - mkdir artifacts +# - ".ci/build-linux.sh" +# - name: Cirrus Linux Clang +# env: +# COMPILER: clang +# clang_script: +# - mkdir artifacts +# - ".ci/build-linux.sh" +# artifacts: +# name: Artifact +# path: "artifacts/*" +# push_script: | +# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ] && [ "$COMPILER" = "gcc" ]; then +# COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) +# COMM_COUNT=$(git rev-list --count HEAD) +# COMM_HASH=$(git rev-parse --short=8 HEAD) + +# export AVVER="${COMM_TAG}-${COMM_COUNT}" + +# .ci/github-upload.sh +# fi; + +freebsd_task: + matrix: + - name: Cirrus FreeBSD + freebsd_instance: + image_family: freebsd-13-5 + cpu: 8 + memory: 8G + env: + CCACHE_MAXSIZE: 300M # 3x clean build, rounded + CCACHE_DIR: /tmp/ccache_dir + ccache_cache: + folder: /tmp/ccache_dir + install_script: "sh -ex ./.ci/install-freebsd.sh" + script: "./.ci/build-freebsd.sh" + +# linux_aarch64_task: +# env: +# BUILD_ARTIFACTSTAGINGDIRECTORY: ${CIRRUS_WORKING_DIR}/artifacts +# ARTDIR: ${CIRRUS_WORKING_DIR}/artifacts/ +# CCACHE_DIR: "/tmp/ccache_dir" +# CCACHE_MAXSIZE: 300M +# CI_HAS_ARTIFACTS: true +# UPLOAD_COMMIT_HASH: a1d35836e8d45bfc6f63c26f0a3e5d46ef622fe1 +# UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux-arm64" +# DEPLOY_APPIMAGE: true +# APPDIR: "./appdir" +# RELEASE_MESSAGE: "../GitHubReleaseMessage.txt" +# COMPILER: clang +# ccache_cache: +# folder: "/tmp/ccache_dir" +# matrix: +# - name: Cirrus Linux AArch64 Clang +# arm_container: +# image: 'docker.io/rpcs3/rpcs3-ci-jammy-aarch64:1.2' +# cpu: 8 +# memory: 8G +# clang_script: +# - mkdir artifacts +# - "sh -ex ./.ci/build-linux-aarch64.sh" +# artifacts: +# name: Artifact +# path: "artifacts/*" +# push_script: | +# if [ "$CIRRUS_REPO_OWNER" = "RPCS3" ] && [ -z "$CIRRUS_PR" ] && [ "$CIRRUS_BRANCH" = "master" ]; then +# COMM_TAG=$(awk '/version{.*}/ { printf("%d.%d.%d", $5, $6, $7) }' ./rpcs3/rpcs3_version.cpp) +# COMM_COUNT=$(git rev-list --count HEAD) +# COMM_HASH=$(git rev-parse --short=8 HEAD) +# export AVVER="${COMM_TAG}-${COMM_COUNT}" +# .ci/github-upload.sh +# fi; diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index d719944078..a0453b46bc 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -16,4 +16,4 @@ Submitting your test results for Commercial Games must be done on our forums. Pl # Contributing -Check the [Coding Style Guidelines](https://github.com/RPCS3/rpcs3/wiki/Coding-Style) and [Developer Information](https://github.com/RPCS3/rpcs3/wiki/Developer-Information). If you have any questions, hit us up on our [Discord Server](https://discord.gg/rpcs3) in the **#development** channel. +Check the [Coding Style Guidelines](https://github.com/RPCS3/rpcs3/wiki/Coding-Style) and [Developer Information](https://github.com/RPCS3/rpcs3/wiki/Developer-Information). If you have any questions, hit us up on our [Discord Server](https://discord.me/RPCS3) in the **#development** channel. diff --git a/.github/ISSUE_TEMPLATE/1-regression-report.yml b/.github/ISSUE_TEMPLATE/1-regression-report.yml index 7c8659b62e..865f96e92b 100644 --- a/.github/ISSUE_TEMPLATE/1-regression-report.yml +++ b/.github/ISSUE_TEMPLATE/1-regression-report.yml @@ -7,7 +7,7 @@ body: attributes: value: | # Summary - Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. + Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. - type: textarea id: quick-summary attributes: @@ -50,7 +50,7 @@ body: * Completely close RPCS3 and locate the log file. RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon). - * On Windows it will be in the ```log``` folder inside your RPCS3 folder. + * On Windows it will be in the RPCS3 directory near the executable * On Linux it will be in ```~/.cache/rpcs3/``` * On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter. - type: textarea diff --git a/.github/ISSUE_TEMPLATE/2-bug-report.yml b/.github/ISSUE_TEMPLATE/2-bug-report.yml index 4a82a03008..00df05c66b 100644 --- a/.github/ISSUE_TEMPLATE/2-bug-report.yml +++ b/.github/ISSUE_TEMPLATE/2-bug-report.yml @@ -7,7 +7,7 @@ body: attributes: value: | # Summary - Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. + Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. - type: textarea id: quick-summary attributes: @@ -36,7 +36,7 @@ body: * Completely close RPCS3 and locate the log file. RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon). - * On Windows it will be in the ```log``` folder inside your RPCS3 folder. + * On Windows it will be in the RPCS3 directory near the executable * On Linux it will be in ```~/.cache/rpcs3/``` * On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter. - type: textarea diff --git a/.github/ISSUE_TEMPLATE/3-feature-request.yml b/.github/ISSUE_TEMPLATE/3-feature-request.yml index 153c44c41c..49a44b923c 100644 --- a/.github/ISSUE_TEMPLATE/3-feature-request.yml +++ b/.github/ISSUE_TEMPLATE/3-feature-request.yml @@ -6,7 +6,7 @@ body: - type: markdown attributes: value: | - Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. + Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. - type: textarea id: quick-summary attributes: @@ -31,6 +31,6 @@ body: * If this feature is something that a game is trying to use, upload a log file for it. RPCS3's Log file will be ```RPCS3.log.gz``` (sometimes shows as RPCS3.log with zip icon) or ```RPCS3.log``` (sometimes shows as RPCS3 wtih notepad icon). - * On Windows it will be in the ```log``` folder inside your RPCS3 folder. + * On Windows it will be in the RPCS3 directory near the executable * On Linux it will be in ```~/.cache/rpcs3/``` * On MacOS it will be in ```~/Library/Caches/rpcs3```. If you're unable to locate it copy paste the path in Spotlight and hit enter. diff --git a/.github/ISSUE_TEMPLATE/4-advanced.md b/.github/ISSUE_TEMPLATE/4-advanced.md index 78a9d96178..f0ab236298 100644 --- a/.github/ISSUE_TEMPLATE/4-advanced.md +++ b/.github/ISSUE_TEMPLATE/4-advanced.md @@ -7,7 +7,7 @@ assignees: '' --- -## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.gg/rpcs3) or [forums](https://forums.rpcs3.net/) instead. +## Please do not ask for help or report compatibility regressions here, use [RPCS3 Discord server](https://discord.me/RPCS3) or [forums](https://forums.rpcs3.net/) instead. You're using the advanced template. You're expected to know what to write in order to fill in all the required information for proper report. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 1dd094e1dc..137afb75a2 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,7 +4,7 @@ contact_links: url: https://rpcs3.net/quickstart about: Everything you need to know to install and configure emulator, and add games - name: Ask for help - url: https://discord.gg/rpcs3 + url: https://discord.me/RPCS3 about: If you have some questions or need help, please use our Discord server instead of GitHub - name: Report game compatibility url: https://forums.rpcs3.net/thread-196671.html diff --git a/.github/PR-BUILD.md b/.github/PR-BUILD.md new file mode 100644 index 0000000000..fb9a0aeb17 --- /dev/null +++ b/.github/PR-BUILD.md @@ -0,0 +1,18 @@ +## How to test a PR build + +Please take into account, that RPCS3 build usually takes some time (about 15 mins), so you can't access a build if a PR was just submitted. + +- Open a PR you want to test +- Scroll to the very bottom and locate the **Checks** section +- Click on **Show all checks** + You are supposed to see something like this + ![image](https://user-images.githubusercontent.com/10283761/116630952-2cd99e00-a94c-11eb-933e-986d6020ca92.png) +- Click on __Details__ on either **Cirrus Linux GCC** or **Cirrus Windows** +- Click **View more details on Cirrus CI** at the very bottom + ![image](https://user-images.githubusercontent.com/10283761/116631111-5e526980-a94c-11eb-95f7-751e6f15e1ea.png) +- Click on the download button for **Artifact** on the **Artifacts** block + ![image](https://user-images.githubusercontent.com/10283761/116631322-bee1a680-a94c-11eb-89a3-be365783582e.png) + +- Congratulations! You are now downloading an RPCS3 build for that specific PR. + +__Please note that PR builds are not supposed to be stable because they contain new changesets.__ diff --git a/.github/PULL_REQUEST_TEMPLATE/1-default.md b/.github/PULL_REQUEST_TEMPLATE/1-default.md new file mode 100644 index 0000000000..710d07ae2a --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE/1-default.md @@ -0,0 +1,3 @@ + + +[How to test this PR](.github/PR-BUILD.md) \ No newline at end of file diff --git a/.github/workflows/llvm.yml b/.github/workflows/llvm.yml deleted file mode 100644 index e3e3e76c50..0000000000 --- a/.github/workflows/llvm.yml +++ /dev/null @@ -1,72 +0,0 @@ -name: Build LLVM - -defaults: - run: - shell: bash -on: - workflow_dispatch: - -concurrency: - group: ${{ github.ref }}-${{ github.event_name }} - cancel-in-progress: true - -env: - BUILD_ARTIFACTSTAGINGDIRECTORY: ${{ github.workspace }}/artifacts/ - -jobs: - Windows_Build: - if: github.event_name == 'workflow_dispatch' - name: LLVM Windows (MSVC) - runs-on: windows-2025 - env: - COMPILER: msvc - CCACHE_SHA: '1f39f3ad5aae3fe915e99ad1302633bc8f6718e58fa7c0de2b0ba7e080f0f08c' - CCACHE_BIN_DIR: 'C:\ccache_bin' - CCACHE_DIR: 'C:\ccache' - CCACHE_INODECACHE: 'true' - CCACHE_SLOPPINESS: 'time_macros' - DEPS_CACHE_DIR: ./dependency_cache - steps: - - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore Dependencies Cache - uses: actions/cache/restore@main - id: restore-dependencies-cache - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: "${{ runner.os }}-${{ env.COMPILER }}-llvm-${{ env.CCACHE_SHA }}" - restore-keys: ${{ runner.os }}-${{ env.COMPILER }}-llvm - - - name: Download and unpack dependencies - run: .ci/setup-llvm.sh - - - name: Add msbuild to PATH - uses: microsoft/setup-msbuild@main - - - name: Compile LLVM - shell: pwsh - run: msbuild 3rdparty\llvm\llvm_build.vcxproj /p:SolutionDir="$(pwd)/" /p:Configuration=Release /v:minimal /p:Platform=x64 /p:PreferredToolArchitecture=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets" - - - name: Pack up build artifacts - run: | - mkdir -p "${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}" - .ci/deploy-llvm.sh - - - name: Upload artifacts (7z) - uses: actions/upload-artifact@main - with: - name: LLVM for Windows (MSVC) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - if-no-files-found: error - - - name: Save Dependencies Cache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: ${{ steps.restore-dependencies-cache.outputs.cache-primary-key }} diff --git a/.github/workflows/rpcs3.yml b/.github/workflows/rpcs3.yml index 1981f78edb..d468496f76 100644 --- a/.github/workflows/rpcs3.yml +++ b/.github/workflows/rpcs3.yml @@ -5,8 +5,6 @@ defaults: shell: bash on: push: - branches: - - master # Only trigger push event on 'master' branch pull_request: workflow_dispatch: @@ -17,42 +15,36 @@ concurrency: env: BUILD_REPOSITORY_NAME: ${{ github.repository }} BUILD_SOURCEBRANCHNAME: ${{ github.ref_name }} - BUILD_PR_NUMBER: ${{ github.event.pull_request.number }} BUILD_SOURCEVERSION: ${{ github.sha }} BUILD_ARTIFACTSTAGINGDIRECTORY: ${{ github.workspace }}/artifacts/ jobs: Linux_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') strategy: fail-fast: false matrix: include: - os: ubuntu-24.04 - docker_img: "rpcs3/rpcs3-ci-jammy:1.6" + docker_img: "rpcs3/rpcs3-ci-jammy:1.3" build_sh: "/rpcs3/.ci/build-linux.sh" compiler: clang UPLOAD_COMMIT_HASH: d812f1254a1157c80fd402f94446310560f54e5f UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux" - os: ubuntu-24.04 - docker_img: "rpcs3/rpcs3-ci-jammy:1.6" + docker_img: "rpcs3/rpcs3-ci-jammy:1.3" build_sh: "/rpcs3/.ci/build-linux.sh" compiler: gcc - os: ubuntu-24.04-arm - docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.6" + docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.3" build_sh: "/rpcs3/.ci/build-linux-aarch64.sh" compiler: clang UPLOAD_COMMIT_HASH: a1d35836e8d45bfc6f63c26f0a3e5d46ef622fe1 UPLOAD_REPO_FULL_NAME: "rpcs3/rpcs3-binaries-linux-arm64" - - os: ubuntu-24.04-arm - docker_img: "rpcs3/rpcs3-ci-jammy-aarch64:1.6" - build_sh: "/rpcs3/.ci/build-linux-aarch64.sh" - compiler: gcc name: RPCS3 Linux ${{ matrix.os }} ${{ matrix.compiler }} runs-on: ${{ matrix.os }} env: CCACHE_DIR: ${{ github.workspace }}/ccache + CI_HAS_ARTIFACTS: true DEPLOY_APPIMAGE: true APPDIR: "/rpcs3/build/appdir" ARTDIR: "/root/artifacts" @@ -60,29 +52,28 @@ jobs: COMPILER: ${{ matrix.compiler }} UPLOAD_COMMIT_HASH: ${{ matrix.UPLOAD_COMMIT_HASH }} UPLOAD_REPO_FULL_NAME: ${{ matrix.UPLOAD_REPO_FULL_NAME }} - RUN_UNIT_TESTS: github.event_name == 'pull_request' && 'ON' || 'OFF' steps: - name: Checkout repository uses: actions/checkout@main with: fetch-depth: 0 - - name: Restore build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache + - name: Setup Cache + uses: actions/cache@main with: path: ${{ env.CCACHE_DIR }} key: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-${{github.run_id}} - restore-keys: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}- + restore-keys: | + ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}- - name: Docker setup and build run: | docker pull --quiet ${{ matrix.docker_img }} - docker run \ - -v $PWD:/rpcs3 \ + docker run \ + -v $PWD:/rpcs3 \ --env-file .ci/docker.env \ -v ${{ env.CCACHE_DIR }}:/root/.ccache \ - -v ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }}:${{ env.ARTDIR }} \ + -v ${{ github.workspace }}/artifacts:/root/artifacts \ ${{ matrix.docker_img }} \ ${{ matrix.build_sh }} @@ -108,117 +99,19 @@ jobs: export AVVER="${COMM_TAG}-${COMM_COUNT}" .ci/github-upload.sh - - name: Save build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - Mac_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - strategy: - fail-fast: false - matrix: - include: - - name: Intel - build_sh: "arch -X86_64 .ci/build-mac.sh" - UPLOAD_COMMIT_HASH: 51ae32f468089a8169aaf1567de355ff4a3e0842 - UPLOAD_REPO_FULL_NAME: rpcs3/rpcs3-binaries-mac - - name: Apple Silicon - build_sh: .ci/build-mac-arm64.sh - UPLOAD_COMMIT_HASH: 8e21bdbc40711a3fccd18fbf17b742348b0f4281 - UPLOAD_REPO_FULL_NAME: rpcs3/rpcs3-binaries-mac-arm64 - name: RPCS3 Mac ${{ matrix.name }} - runs-on: macos-14 - env: - CCACHE_DIR: /tmp/ccache_dir - QT_VER: '6.7.3' - QT_VER_MAIN: '6' - LLVM_COMPILER_VER: '19' - RELEASE_MESSAGE: ../GitHubReleaseMessage.txt - UPLOAD_COMMIT_HASH: ${{ matrix.UPLOAD_COMMIT_HASH }} - UPLOAD_REPO_FULL_NAME: ${{ matrix.UPLOAD_REPO_FULL_NAME }} - RUN_UNIT_TESTS: github.event_name == 'pull_request' && 'ON' || 'OFF' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore Build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ runner.os }}-ccache-${{ matrix.name }}-${{github.run_id}} - restore-keys: ${{ runner.os }}-ccache-${{ matrix.name }}- - - - name: Restore Qt Cache - uses: actions/cache/restore@main - id: restore-qt-cache - with: - path: /tmp/Qt - key: ${{ runner.os }}-qt-${{ matrix.name }}-${{ env.QT_VER }} - restore-keys: ${{ runner.os }}-qt-${{ matrix.name }}-${{ env.QT_VER }} - - - name: Build - run: ${{ matrix.build_sh }} - - - name: Upload artifacts - uses: actions/upload-artifact@main - with: - name: RPCS3 for Mac (${{ matrix.name }}) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - - - name: Export Variables - run: | - while IFS='=' read -r key val; do - # Skip lines that are empty or start with '#' - [[ -z "$key" || "$key" =~ ^# ]] && continue - echo "$key=$val" >> "${{ github.env }}" - done < .ci/ci-vars.env - - - name: Deploy master build to GitHub Releases - if: | - github.event_name != 'pull_request' && - github.repository == 'RPCS3/rpcs3' && - github.ref == 'refs/heads/master' - env: - RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }} - run: .ci/github-upload.sh - - - name: Save Build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - - name: Save Qt Cache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: /tmp/Qt - key: ${{ steps.restore-qt-cache.outputs.cache-primary-key }} - Windows_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') name: RPCS3 Windows runs-on: windows-2025 env: COMPILER: msvc QT_VER_MAIN: '6' - QT_VER: '6.9.1' + QT_VER: '6.8.3' QT_VER_MSVC: 'msvc2022' - QT_DATE: '202505291653' + QT_DATE: '202503201308' LLVM_VER: '19.1.7' VULKAN_VER: '1.3.268.0' VULKAN_SDK_SHA: '8459ef49bd06b697115ddd3d97c9aec729e849cd775f5be70897718a9b3b9db5' - CCACHE_SHA: '1f39f3ad5aae3fe915e99ad1302633bc8f6718e58fa7c0de2b0ba7e080f0f08c' + CCACHE_SHA: '6252f081876a9a9f700fae13a5aec5d0d486b28261d7f1f72ac11c7ad9df4da9' CCACHE_BIN_DIR: 'C:\ccache_bin' CCACHE_DIR: 'C:\ccache' CCACHE_INODECACHE: 'true' @@ -233,12 +126,6 @@ jobs: with: fetch-depth: 0 - - name: Setup NuGet - uses: nuget/setup-nuget@v2 - - - name: Restore NuGet packages - run: nuget restore rpcs3.sln - - name: Setup env shell: pwsh run: | @@ -248,26 +135,22 @@ jobs: - name: Get Cache Keys run: .ci/get_keys-windows.sh - - name: Restore Build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache + - name: Setup Build Ccache + uses: actions/cache@main with: path: ${{ env.CCACHE_DIR }} key: "${{ runner.os }}-ccache-${{ env.COMPILER }}-${{github.run_id}}" restore-keys: ${{ runner.os }}-ccache-${{ env.COMPILER }}- - - name: Restore Dependencies Cache - uses: actions/cache/restore@main - id: restore-dependencies-cache + - name: Setup Dependencies Cache + uses: actions/cache@main with: path: ${{ env.DEPS_CACHE_DIR }} - key: "${{ runner.os }}-${{ env.COMPILER }}-${{ env.QT_VER }}-${{ env.VULKAN_SDK_SHA }}-${{ env.CCACHE_SHA }}-${{ hashFiles('llvm.lock') }}" + key: "${{ runner.os }}-${{ env.COMPILER }}-${{ env.QT_VER }}-${{ env.VULKAN_SDK_SHA }}-${{ env.CCACHE_SHA }}-${{ hashFiles('llvm.lock') }}-${{ hashFiles('glslang.lock') }}" restore-keys: ${{ runner.os }}-${{ env.COMPILER }}- - name: Download and unpack dependencies - run: | - .ci/setup-windows.sh - .ci/setup-windows-ci-vars.sh win64 msvc + run: .ci/setup-windows.sh - name: Export Variables run: | @@ -282,12 +165,7 @@ jobs: - name: Compile RPCS3 shell: pwsh - run: msbuild rpcs3.sln /p:Configuration=Release /v:minimal /p:Platform=x64 /p:PreferredToolArchitecture=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets" - - - name: Run Unit Tests - if: github.event_name == 'pull_request' - shell: pwsh - run: build\lib\Release-x64\rpcs3_test.exe + run: msbuild rpcs3.sln /p:Configuration=Release /v:minimal /p:Platform=x64 /p:CLToolPath=${{ env.CCACHE_BIN_DIR }} /p:UseMultiToolTask=true /p:CustomAfterMicrosoftCommonTargets="${{ github.workspace }}\buildfiles\msvc\ci_only.targets" - name: Pack up build artifacts run: | @@ -310,135 +188,3 @@ jobs: env: RPCS3_TOKEN: ${{ secrets.RPCS3_TOKEN }} run: .ci/github-upload.sh - - - name: Save Build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - - name: Save Dependencies Cache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.DEPS_CACHE_DIR }} - key: ${{ steps.restore-dependencies-cache.outputs.cache-primary-key }} - - Windows_Build_Clang: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - name: RPCS3 Windows Clang - runs-on: windows-2025 - strategy: - matrix: - include: - - msys2: clang64 - compiler: clang - arch: win64 - env: - CCACHE_DIR: 'C:\ccache' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Setup msys2 - uses: msys2/setup-msys2@v2 - with: - msystem: ${{ matrix.msys2 }} - update: true - cache: true - install: | - mingw-w64-clang-x86_64-clang - mingw-w64-clang-x86_64-ccache - mingw-w64-clang-x86_64-cmake - mingw-w64-clang-x86_64-lld - mingw-w64-clang-x86_64-ninja - mingw-w64-clang-x86_64-llvm - mingw-w64-clang-x86_64-ffmpeg - mingw-w64-clang-x86_64-opencv - mingw-w64-clang-x86_64-glew - mingw-w64-clang-x86_64-vulkan - mingw-w64-clang-x86_64-vulkan-headers - mingw-w64-clang-x86_64-vulkan-loader - mingw-w64-clang-x86_64-gtest - mingw-w64-clang-x86_64-qt6-base - mingw-w64-clang-x86_64-qt6-declarative - mingw-w64-clang-x86_64-qt6-multimedia - mingw-w64-clang-x86_64-qt6-svg - base-devel - curl - git - p7zip - - - name: Restore build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}-${{ github.run_id }} - restore-keys: ${{ runner.os }}-ccache-${{ matrix.compiler }}-${{ runner.arch }}- - - - name: Build RPCS3 - shell: msys2 {0} - run: | - export CCACHE_DIR=$(cygpath -u "$CCACHE_DIR") - echo "CCACHE_DIR=$CCACHE_DIR" - .ci/setup-windows-ci-vars.sh ${{ matrix.arch }} ${{ matrix.compiler }} - .ci/build-windows-clang.sh - - - name: Save build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} - - - name: Upload artifacts - uses: actions/upload-artifact@main - with: - name: RPCS3 for Windows (${{ runner.arch }}, ${{ matrix.compiler }}) - path: ${{ env.BUILD_ARTIFACTSTAGINGDIRECTORY }} - compression-level: 0 - if-no-files-found: error - - FreeBSD_Build: - # Only run push event on master branch of main repo, but run all PRs - if: github.event_name != 'push' || (github.repository == 'RPCS3/rpcs3' && github.ref_name == 'master') - name: RPCS3 FreeBSD - runs-on: ubuntu-latest - timeout-minutes: 60 - env: - CCACHE_DIR: ${{ github.workspace }}/ccache - QT_VER_MAIN: '6' - LLVM_COMPILER_VER: '19' - steps: - - name: Checkout repository - uses: actions/checkout@main - with: - fetch-depth: 0 - - - name: Restore Build Ccache - uses: actions/cache/restore@main - id: restore-build-ccache - with: - path: ${{ env.CCACHE_DIR }} - key: FreeBSD-ccache-${{github.run_id}} - restore-keys: FreeBSD-ccache- - - - name: FreeBSD build - id: root - uses: vmactions/freebsd-vm@v1 - with: - envs: 'QT_VER_MAIN LLVM_COMPILER_VER CCACHE_DIR' - usesh: true - run: .ci/install-freebsd.sh && .ci/build-freebsd.sh - - - name: Save Build Ccache - if: github.ref == 'refs/heads/master' - uses: actions/cache/save@main - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ steps.restore-build-ccache.outputs.cache-primary-key }} diff --git a/.gitignore b/.gitignore index 4688d5fa52..13615be636 100644 --- a/.gitignore +++ b/.gitignore @@ -35,7 +35,6 @@ /lib /tmp /ipch -/packages /rpcs3/Debug /rpcs3/Release @@ -56,6 +55,9 @@ /bin/GuiConfigs/*.dat /bin/GuiConfigs/*.dat.* +# Some data from git +!/bin/git/ + # Visual Studio Files .vs/* .vscode/* diff --git a/.gitmodules b/.gitmodules index 427c61ffbd..305705b06a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -106,5 +106,5 @@ ignore = dirty [submodule "3rdparty/GPUOpen/VulkanMemoryAllocator"] path = 3rdparty/GPUOpen/VulkanMemoryAllocator - url = ../../GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git + url = ../../Megamouse/VulkanMemoryAllocator.git ignore = dirty diff --git a/3rdparty/CMakeLists.txt b/3rdparty/CMakeLists.txt index 6c49a889ba..cb0909e978 100644 --- a/3rdparty/CMakeLists.txt +++ b/3rdparty/CMakeLists.txt @@ -131,14 +131,7 @@ add_subdirectory(stblib) add_subdirectory(discord-rpc) # Cubeb -if(USE_SYSTEM_CUBEB) - find_package(cubeb REQUIRED GLOBAL) - message(STATUS "Using system cubeb version '${cubeb_VERSION}'") - add_library(3rdparty::cubeb ALIAS cubeb::cubeb) -else() - message(STATUS "Using static cubeb from 3rdparty") - add_subdirectory(cubeb EXCLUDE_FROM_ALL) -endif() +add_subdirectory(cubeb EXCLUDE_FROM_ALL) # SoundTouch add_subdirectory(SoundTouch EXCLUDE_FROM_ALL) @@ -200,6 +193,9 @@ if(USE_VULKAN) if (WAYLAND_FOUND) target_include_directories(3rdparty_vulkan INTERFACE ${WAYLAND_INCLUDE_DIR}) + + target_compile_definitions(3rdparty_vulkan + INTERFACE -DVK_USE_PLATFORM_WAYLAND_KHR) endif() endif() @@ -215,29 +211,6 @@ endif() # AsmJit add_subdirectory(asmjit EXCLUDE_FROM_ALL) -# SDL3 -set(SDL3_TARGET 3rdparty_dummy_lib) -if(USE_SDL) - if(USE_SYSTEM_SDL) - find_package(SDL3) - if(SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0) - message(STATUS "Using system SDL3 version '${SDL3_VERSION}'") - add_library(3rdparty_sdl3 INTERFACE) - target_compile_definitions(3rdparty_sdl3 INTERFACE -DHAVE_SDL3=1) - target_link_libraries(3rdparty_sdl3 INTERFACE SDL3::SDL3) - set(SDL3_TARGET 3rdparty_sdl3) - else() - message(FATAL_ERROR "SDL3 is not available on this system") - endif() - else() - message(STATUS "Using static SDL3 from 3rdparty") - add_subdirectory(libsdl-org EXCLUDE_FROM_ALL) - target_compile_definitions(SDL3-static INTERFACE -DHAVE_SDL3=1) - set(SDL3_TARGET SDL3-static) - set(SDL3_DIR "${CMAKE_CURRENT_BINARY_DIR}/libsdl-org/SDL" CACHE STRING "") - endif() -endif() - # OpenAL if (NOT ANDROID) add_subdirectory(OpenAL EXCLUDE_FROM_ALL) @@ -252,31 +225,31 @@ if(USE_FAUDIO) # FAudio depends on SDL3 find_package(SDL3) if (USE_SYSTEM_FAUDIO) - if (SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0) + if (NOT SDL3_FOUND OR SDL3_VERSION VERSION_LESS 3.2.0) + message(WARNING + "RPCS3: System FAudio requires SDL 3.2.0 or newer. Since a valid SDL3" + ">=3.2.0 version cannot be found, building with FAudio will be skipped.") + set(USE_FAUDIO OFF CACHE BOOL "Disabled using system FAudio with SDL < 3.2.0" FORCE) + else() message(STATUS "RPCS3: Using system FAudio") find_package(FAudio REQUIRED CONFIGS FAudioConfig.cmake FAudio-config.cmake) add_library(3rdparty_FAudio INTERFACE) target_link_libraries(3rdparty_FAudio INTERFACE FAudio) target_compile_definitions(3rdparty_FAudio INTERFACE -DHAVE_FAUDIO) set(FAUDIO_TARGET 3rdparty_FAudio) - else() - message(WARNING - "RPCS3: System FAudio requires SDL 3.2.0 or newer. Since a valid SDL3" - ">=3.2.0 version cannot be found, building with FAudio will be skipped.") - set(USE_FAUDIO OFF CACHE BOOL "Disabled using system FAudio with SDL < 3.2.0" FORCE) endif() else() - if (SDL3_FOUND AND SDL3_VERSION VERSION_GREATER_EQUAL 3.2.0) + if (NOT SDL3_FOUND OR SDL3_VERSION VERSION_LESS 3.2.0) + message(WARNING + "-- RPCS3: 3rdparty FAudio requires SDL 3.2.0 or newer. Since a valid SDL3" + ">=3.2.0 version cannot be found, building with FAudio will be skipped.") + set(USE_FAUDIO OFF CACHE BOOL "Disabled FAudio with SDL < 3.2.0" FORCE) + else() message(STATUS "RPCS3: Using builtin FAudio") set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared library") add_subdirectory(FAudio EXCLUDE_FROM_ALL) target_compile_definitions(FAudio-static INTERFACE -DHAVE_FAUDIO) set(FAUDIO_TARGET FAudio-static) - else() - message(FATAL_ERROR - "-- RPCS3: 3rdparty FAudio requires SDL 3.2.0 or newer. Since a valid SDL3" - ">=3.2.0 version cannot be found, building with FAudio will be skipped.") - set(USE_FAUDIO OFF CACHE BOOL "Disabled FAudio with SDL < 3.2.0" FORCE) endif() endif() endif() @@ -345,6 +318,29 @@ add_subdirectory(wolfssl EXCLUDE_FROM_ALL) # CURL add_subdirectory(curl EXCLUDE_FROM_ALL) +# SDL3 +set(SDL3_TARGET 3rdparty_dummy_lib) +if(USE_SDL) + if(USE_SYSTEM_SDL) + find_package(SDL3) + if(SDL3_FOUND AND NOT SDL3_VERSION VERSION_LESS 3.2.0) + message(STATUS "Using system SDL3 version '${SDL3_VERSION}'") + add_library(3rdparty_sdl3 INTERFACE) + target_compile_definitions(3rdparty_sdl3 INTERFACE -DHAVE_SDL3=1) + target_link_libraries(3rdparty_sdl3 INTERFACE SDL3::SDL3) + set(SDL3_TARGET 3rdparty_sdl3) + else() + message(FATAL_ERROR "SDL3 is not available on this system") + endif() + else() + message(STATUS "Using static SDL3 from 3rdparty") + add_library(3rdparty_sdl3 INTERFACE) + target_compile_definitions(3rdparty_sdl3 INTERFACE -DHAVE_SDL3=1) + add_subdirectory(libsdl-org EXCLUDE_FROM_ALL) + set(SDL3_TARGET 3rdparty_sdl3) + endif() +endif() + # MINIUPNP add_subdirectory(miniupnp EXCLUDE_FROM_ALL) diff --git a/3rdparty/FAudio b/3rdparty/FAudio index e6ddfabab2..486e33eef3 160000 --- a/3rdparty/FAudio +++ b/3rdparty/FAudio @@ -1 +1 @@ -Subproject commit e6ddfabab2efbc8765750039634fe5e24ac31205 +Subproject commit 486e33eef3f282e4ce3d29f32ded3e67bacdbe5c diff --git a/3rdparty/GL/glext.h b/3rdparty/GL/glext.h index 276a962a96..61ff1b0708 100644 --- a/3rdparty/GL/glext.h +++ b/3rdparty/GL/glext.h @@ -32,7 +32,7 @@ extern "C" { #define GLAPI extern #endif -#define GL_GLEXT_VERSION 20250203 +#define GL_GLEXT_VERSION 20220530 #include @@ -5397,12 +5397,12 @@ typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severi typedef void (APIENTRYP PFNGLDEBUGMESSAGEENABLEAMDPROC) (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTAMDPROC) (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf); typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKAMDPROC) (GLDEBUGPROCAMD callback, void *userParam); -typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message); +typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message); #ifdef GL_GLEXT_PROTOTYPES GLAPI void APIENTRY glDebugMessageEnableAMD (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled); GLAPI void APIENTRY glDebugMessageInsertAMD (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf); GLAPI void APIENTRY glDebugMessageCallbackAMD (GLDEBUGPROCAMD callback, void *userParam); -GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLenum *severities, GLuint *ids, GLsizei *lengths, GLchar *message); +GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufSize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message); #endif #endif /* GL_AMD_debug_output */ @@ -7370,16 +7370,6 @@ GLAPI void APIENTRY glBlitFramebufferEXT (GLint srcX0, GLint srcY0, GLint srcX1, #endif #endif /* GL_EXT_framebuffer_blit */ -#ifndef GL_EXT_framebuffer_blit_layers -#define GL_EXT_framebuffer_blit_layers 1 -typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYERSEXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); -typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERLAYEREXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter); -#ifdef GL_GLEXT_PROTOTYPES -GLAPI void APIENTRY glBlitFramebufferLayersEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter); -GLAPI void APIENTRY glBlitFramebufferLayerEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint srcLayer, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLint dstLayer, GLbitfield mask, GLenum filter); -#endif -#endif /* GL_EXT_framebuffer_blit_layers */ - #ifndef GL_EXT_framebuffer_multisample #define GL_EXT_framebuffer_multisample 1 #define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB @@ -9404,11 +9394,6 @@ GLAPI void APIENTRY glResizeBuffersMESA (void); #define GL_MESA_shader_integer_functions 1 #endif /* GL_MESA_shader_integer_functions */ -#ifndef GL_MESA_texture_const_bandwidth -#define GL_MESA_texture_const_bandwidth 1 -#define GL_CONST_BW_TILING_MESA 0x8BBE -#endif /* GL_MESA_texture_const_bandwidth */ - #ifndef GL_MESA_tile_raster_order #define GL_MESA_tile_raster_order 1 #define GL_TILE_RASTER_ORDER_FIXED_MESA 0x8BB8 @@ -10263,6 +10248,12 @@ typedef void (APIENTRYP PFNGLMULTITEXCOORD3HNVPROC) (GLenum target, GLhalfNV s, typedef void (APIENTRYP PFNGLMULTITEXCOORD3HVNVPROC) (GLenum target, const GLhalfNV *v); typedef void (APIENTRYP PFNGLMULTITEXCOORD4HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); typedef void (APIENTRYP PFNGLMULTITEXCOORD4HVNVPROC) (GLenum target, const GLhalfNV *v); +typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog); +typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight); +typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight); typedef void (APIENTRYP PFNGLVERTEXATTRIB1HNVPROC) (GLuint index, GLhalfNV x); typedef void (APIENTRYP PFNGLVERTEXATTRIB1HVNVPROC) (GLuint index, const GLhalfNV *v); typedef void (APIENTRYP PFNGLVERTEXATTRIB2HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y); @@ -10275,12 +10266,6 @@ typedef void (APIENTRYP PFNGLVERTEXATTRIBS1HVNVPROC) (GLuint index, GLsizei n, c typedef void (APIENTRYP PFNGLVERTEXATTRIBS2HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); typedef void (APIENTRYP PFNGLVERTEXATTRIBS3HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); typedef void (APIENTRYP PFNGLVERTEXATTRIBS4HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v); -typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog); -typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog); -typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue); -typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v); -typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight); -typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight); #ifdef GL_GLEXT_PROTOTYPES GLAPI void APIENTRY glVertex2hNV (GLhalfNV x, GLhalfNV y); GLAPI void APIENTRY glVertex2hvNV (const GLhalfNV *v); @@ -10310,6 +10295,12 @@ GLAPI void APIENTRY glMultiTexCoord3hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLAPI void APIENTRY glMultiTexCoord3hvNV (GLenum target, const GLhalfNV *v); GLAPI void APIENTRY glMultiTexCoord4hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q); GLAPI void APIENTRY glMultiTexCoord4hvNV (GLenum target, const GLhalfNV *v); +GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog); +GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog); +GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue); +GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v); +GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight); +GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight); GLAPI void APIENTRY glVertexAttrib1hNV (GLuint index, GLhalfNV x); GLAPI void APIENTRY glVertexAttrib1hvNV (GLuint index, const GLhalfNV *v); GLAPI void APIENTRY glVertexAttrib2hNV (GLuint index, GLhalfNV x, GLhalfNV y); @@ -10322,12 +10313,6 @@ GLAPI void APIENTRY glVertexAttribs1hvNV (GLuint index, GLsizei n, const GLhalfN GLAPI void APIENTRY glVertexAttribs2hvNV (GLuint index, GLsizei n, const GLhalfNV *v); GLAPI void APIENTRY glVertexAttribs3hvNV (GLuint index, GLsizei n, const GLhalfNV *v); GLAPI void APIENTRY glVertexAttribs4hvNV (GLuint index, GLsizei n, const GLhalfNV *v); -GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog); -GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog); -GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue); -GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v); -GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight); -GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight); #endif #endif /* GL_NV_half_float */ @@ -11464,10 +11449,6 @@ GLAPI void APIENTRY glDrawTransformFeedbackNV (GLenum mode, GLuint id); #endif #endif /* GL_NV_transform_feedback2 */ -#ifndef GL_NV_uniform_buffer_std430_layout -#define GL_NV_uniform_buffer_std430_layout 1 -#endif /* GL_NV_uniform_buffer_std430_layout */ - #ifndef GL_NV_uniform_buffer_unified_memory #define GL_NV_uniform_buffer_unified_memory 1 #define GL_UNIFORM_BUFFER_UNIFIED_NV 0x936E @@ -11983,10 +11964,8 @@ GLAPI void APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum s #define GL_MAX_VIEWS_OVR 0x9631 #define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633 typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); -typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); #ifdef GL_GLEXT_PROTOTYPES GLAPI void APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); -GLAPI void APIENTRY glNamedFramebufferTextureMultiviewOVR (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews); #endif #endif /* GL_OVR_multiview */ diff --git a/3rdparty/GPUOpen/VulkanMemoryAllocator b/3rdparty/GPUOpen/VulkanMemoryAllocator index 1d8f600fd4..3706484339 160000 --- a/3rdparty/GPUOpen/VulkanMemoryAllocator +++ b/3rdparty/GPUOpen/VulkanMemoryAllocator @@ -1 +1 @@ -Subproject commit 1d8f600fd424278486eade7ed3e877c99f0846b1 +Subproject commit 37064843398c69cc0ca7f8cf5b33128c03a2bd74 diff --git a/3rdparty/MoltenVK/CMakeLists.txt b/3rdparty/MoltenVK/CMakeLists.txt index ec9c2b802b..fc1b93074c 100644 --- a/3rdparty/MoltenVK/CMakeLists.txt +++ b/3rdparty/MoltenVK/CMakeLists.txt @@ -1,9 +1,10 @@ +cmake_minimum_required(VERSION 2.8.12) project(moltenvk NONE) include(ExternalProject) ExternalProject_Add(moltenvk GIT_REPOSITORY https://github.com/KhronosGroup/MoltenVK.git - GIT_TAG 49b97f2 + GIT_TAG 81541f6 BUILD_IN_SOURCE 1 SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK CONFIGURE_COMMAND "${CMAKE_CURRENT_SOURCE_DIR}/MoltenVK/fetchDependencies" --macos diff --git a/3rdparty/OpenAL/CMakeLists.txt b/3rdparty/OpenAL/CMakeLists.txt index b9fee23ce5..f37a776a1f 100644 --- a/3rdparty/OpenAL/CMakeLists.txt +++ b/3rdparty/OpenAL/CMakeLists.txt @@ -11,7 +11,6 @@ if(USE_SYSTEM_OPENAL) else() option(ALSOFT_UTILS "Build utility programs" OFF) option(ALSOFT_EXAMPLES "Build example programs" OFF) - set(LIBTYPE "STATIC") add_subdirectory(openal-soft EXCLUDE_FROM_ALL) add_library(3rdparty_openal INTERFACE) target_link_libraries(3rdparty_openal INTERFACE OpenAL::OpenAL) diff --git a/3rdparty/OpenAL/openal-soft b/3rdparty/OpenAL/openal-soft index dc7d7054a5..90191edd20 160000 --- a/3rdparty/OpenAL/openal-soft +++ b/3rdparty/OpenAL/openal-soft @@ -1 +1 @@ -Subproject commit dc7d7054a5b4f3bec1dc23a42fd616a0847af948 +Subproject commit 90191edd20bb877c5cbddfdac7ec0fe49ad93727 diff --git a/3rdparty/SoundTouch/soundtouch b/3rdparty/SoundTouch/soundtouch index 3982730833..394e1f58b2 160000 --- a/3rdparty/SoundTouch/soundtouch +++ b/3rdparty/SoundTouch/soundtouch @@ -1 +1 @@ -Subproject commit 3982730833b6daefe77dcfb32b5c282851640c17 +Subproject commit 394e1f58b23dc80599214d2e9b6a5e0dfd0bbe07 diff --git a/3rdparty/curl/CMakeLists.txt b/3rdparty/curl/CMakeLists.txt index b20763af65..d8fc5790fc 100644 --- a/3rdparty/curl/CMakeLists.txt +++ b/3rdparty/curl/CMakeLists.txt @@ -26,8 +26,8 @@ else() endif() set(CURL_USE_LIBSSH2 OFF CACHE BOOL "Use libSSH2") set(CURL_USE_LIBPSL OFF CACHE BOOL "Use libPSL") - option(BUILD_TESTING "Build tests" OFF) - option(BUILD_EXAMPLES "Build libcurl examples" OFF) + + set(CURL_DISABLE_TESTS ON) add_subdirectory(curl EXCLUDE_FROM_ALL) diff --git a/3rdparty/curl/curl b/3rdparty/curl/curl index fdb8a789d2..57495c6487 160000 --- a/3rdparty/curl/curl +++ b/3rdparty/curl/curl @@ -1 +1 @@ -Subproject commit fdb8a789d2b446b77bd7cdd2eff95f6cbc814cf4 +Subproject commit 57495c64871d18905a0941db9196ef90bafe9a29 diff --git a/3rdparty/curl/libcurl.vcxproj b/3rdparty/curl/libcurl.vcxproj index c4a96abdc4..710ea9b225 100644 --- a/3rdparty/curl/libcurl.vcxproj +++ b/3rdparty/curl/libcurl.vcxproj @@ -62,8 +62,8 @@ - - + + @@ -76,18 +76,6 @@ - - - - - - - - - - - - @@ -96,6 +84,7 @@ + @@ -105,16 +94,14 @@ - - + - @@ -127,9 +114,11 @@ + + @@ -145,10 +134,10 @@ + - @@ -156,8 +145,8 @@ - + @@ -185,20 +174,21 @@ - + + + + + - - - - + @@ -208,6 +198,7 @@ + @@ -269,20 +260,9 @@ - - - - - - - - - - - - - + + @@ -297,6 +277,7 @@ + @@ -310,15 +291,14 @@ - + - @@ -346,15 +326,15 @@ + - - + @@ -384,19 +364,19 @@ + + + - - - - + @@ -409,6 +389,7 @@ + diff --git a/3rdparty/curl/libcurl.vcxproj.filters b/3rdparty/curl/libcurl.vcxproj.filters index 32eb05f40e..3592158a28 100644 --- a/3rdparty/curl/libcurl.vcxproj.filters +++ b/3rdparty/curl/libcurl.vcxproj.filters @@ -24,6 +24,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -63,6 +69,9 @@ Source Files + + Source Files + Source Files @@ -123,6 +132,9 @@ Source Files + + Source Files + Source Files @@ -132,6 +144,9 @@ Source Files + + Source Files + Source Files @@ -162,6 +177,9 @@ Source Files + + Source Files + Source Files @@ -192,6 +210,9 @@ Source Files + + Source Files + Source Files @@ -270,6 +291,12 @@ Source Files + + Source Files + + + Source Files + Source Files @@ -279,6 +306,9 @@ Source Files + + Source Files + Source Files @@ -291,6 +321,9 @@ Source Files + + Source Files + Source Files @@ -369,9 +402,15 @@ Source Files + + Source Files + Source Files + + Source Files + Source Files @@ -405,6 +444,9 @@ Source Files + + Source Files + Source Files @@ -468,6 +510,9 @@ Source Files + + Source Files + Source Files @@ -477,78 +522,6 @@ Source Files - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - - - Source Files - @@ -620,9 +593,15 @@ Header Files + + Header Files + Header Files + + Header Files + Header Files @@ -662,6 +641,9 @@ Header Files + + Header Files + Header Files @@ -761,6 +743,9 @@ Header Files + + Header Files + Header Files @@ -779,6 +764,9 @@ Header Files + + Header Files + Header Files @@ -872,6 +860,9 @@ Header Files + + Header Files + Header Files @@ -884,6 +875,9 @@ Header Files + + Header Files + Header Files @@ -926,12 +920,18 @@ Header Files + + Header Files + Header Files Header Files + + Header Files + Header Files @@ -962,6 +962,9 @@ Header Files + + Header Files + Header Files @@ -1034,6 +1037,9 @@ Header Files + + Header Files + Header Files @@ -1049,69 +1055,6 @@ Header Files - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - - - Header Files - diff --git a/3rdparty/discord-rpc/discord-rpc b/3rdparty/discord-rpc/discord-rpc index 3dc2c326cb..171b2142ac 160000 --- a/3rdparty/discord-rpc/discord-rpc +++ b/3rdparty/discord-rpc/discord-rpc @@ -1 +1 @@ -Subproject commit 3dc2c326cb4dc5815c6069970c13154898f58d48 +Subproject commit 171b2142ac8acdf016c231e36dc7a8d48daff19c diff --git a/3rdparty/glslang/glslang b/3rdparty/glslang/glslang index fc9889c889..36d08c0d94 160000 --- a/3rdparty/glslang/glslang +++ b/3rdparty/glslang/glslang @@ -1 +1 @@ -Subproject commit fc9889c889561c5882e83819dcaffef5ed45529b +Subproject commit 36d08c0d940cf307a23928299ef52c7970d8cee6 diff --git a/3rdparty/glslang/glslang.vcxproj b/3rdparty/glslang/glslang.vcxproj index 298fd149c6..762fff7622 100644 --- a/3rdparty/glslang/glslang.vcxproj +++ b/3rdparty/glslang/glslang.vcxproj @@ -39,15 +39,17 @@ "Visual Studio $(VisualStudioVersion.Substring(0,2))" call vsdevcmd.bat -arch=amd64 - cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Release" -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded -DGLSLANG_TESTS=OFF -DENABLE_GLSLANG_BINARIES=OFF -DBUILD_EXTERNAL=OFF -DENABLE_SPVREMAPPER=OFF -DENABLE_HLSL=OFF -DENABLE_OPT=OFF -S glslang -B "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" + cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Release" -DLLVM_USE_CRT_DEBUG=MDd -DLLVM_USE_CRT_RELEASE=MT -DENABLE_OPT=OFF -S glslang -B "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" call vsdevcmd.bat -arch=amd64 - cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Debug" -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreadedDebug -DGLSLANG_TESTS=OFF -DENABLE_GLSLANG_BINARIES=OFF -DBUILD_EXTERNAL=OFF -DENABLE_SPVREMAPPER=OFF -DENABLE_HLSL=OFF -DENABLE_OPT=OFF -S glslang -B "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" + cmake -G $(CmakeGenerator) -A x64 -DCMAKE_BUILD_TYPE="Debug" -DLLVM_USE_CRT_DEBUG=MDd -DLLVM_USE_CRT_RELEASE=MT -DENABLE_OPT=OFF -S glslang -B "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)" echo Copying.. mkdir "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" + copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\hlsl\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\SPIRV\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" + copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\OGLCompilersDLL\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\glslang\OSDependent\Windows\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" copy "$(SolutionDir)build\tmp\$(ProjectName)-$(Configuration)-$(Platform)\glslang\$(CONFIGURATION)\*.lib" "$(SolutionDir)build\lib\$(Configuration)-$(Platform)\$(ProjectName)" diff --git a/3rdparty/hidapi/hidapi b/3rdparty/hidapi/hidapi index f42423643e..6bfdcf7368 160000 --- a/3rdparty/hidapi/hidapi +++ b/3rdparty/hidapi/hidapi @@ -1 +1 @@ -Subproject commit f42423643ec9011c98cccc0bb790722bbbd3f30b +Subproject commit 6bfdcf7368169efe1b745cd4468d45cda05ef8de diff --git a/3rdparty/libpng/libpng b/3rdparty/libpng/libpng index 2b978915d8..872555f4ba 160000 --- a/3rdparty/libpng/libpng +++ b/3rdparty/libpng/libpng @@ -1 +1 @@ -Subproject commit 2b978915d82377df13fcbb1fb56660195ded868a +Subproject commit 872555f4ba910252783af1507f9e7fe1653be252 diff --git a/3rdparty/libsdl-org/CMakeLists.txt b/3rdparty/libsdl-org/CMakeLists.txt index ba59cec8f7..d6fb6c8890 100644 --- a/3rdparty/libsdl-org/CMakeLists.txt +++ b/3rdparty/libsdl-org/CMakeLists.txt @@ -1,4 +1,11 @@ -option(SDL_SHARED "Build a shared version of the library" OFF) -option(SDL_STATIC "Build a static version of the library" ON) -option(SDL_TEST_LIBRARY "Build the SDL3_test library" OFF) +option(SDL2_DISABLE_SDL2MAIN "" ON) +option(SDL2_DISABLE_INSTALL "" ON) +option(SDL2_DISABLE_UNINSTALL "" ON) +option(SDL_SHARED OFF) +set(SDL_SHARED_ENABLED_BY_DEFAULT OFF) +option(SDL_STATIC ON) +set(SDL_STATIC_ENABLED_BY_DEFAULT ON) +option(SDL_TEST OFF) +set(SDL_TEST_ENABLED_BY_DEFAULT OFF) +set(OPT_DEF_LIBC ON) add_subdirectory(SDL EXCLUDE_FROM_ALL) diff --git a/3rdparty/libsdl-org/SDL b/3rdparty/libsdl-org/SDL index c9a6709bd2..f6864924f7 160000 --- a/3rdparty/libsdl-org/SDL +++ b/3rdparty/libsdl-org/SDL @@ -1 +1 @@ -Subproject commit c9a6709bd21750f1ad9597be21abace78c6378c9 +Subproject commit f6864924f76e1a0b4abaefc76ae2ed22b1a8916e diff --git a/3rdparty/libsdl-org/SDL.vcxproj b/3rdparty/libsdl-org/SDL.vcxproj index 81b7d853a5..e062060fab 100644 --- a/3rdparty/libsdl-org/SDL.vcxproj +++ b/3rdparty/libsdl-org/SDL.vcxproj @@ -532,6 +532,8 @@ SDL\include;SDL\include\build_config;%(AdditionalIncludeDirectories) ProgramDatabase MaxSpeed + SDL_HIDAPI_DISABLED;%(PreprocessorDefinitions) + SDL_HIDAPI_DISABLED;%(PreprocessorDefinitions) diff --git a/3rdparty/libusb/CMakeLists.txt b/3rdparty/libusb/CMakeLists.txt index 0fc66f2429..7d65aab0ad 100644 --- a/3rdparty/libusb/CMakeLists.txt +++ b/3rdparty/libusb/CMakeLists.txt @@ -1,3 +1,5 @@ +cmake_minimum_required(VERSION 3.0) + project(libusb) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake_modules") diff --git a/3rdparty/libusb/libusb b/3rdparty/libusb/libusb index 15a7ebb4d4..a61afe5f75 160000 --- a/3rdparty/libusb/libusb +++ b/3rdparty/libusb/libusb @@ -1 +1 @@ -Subproject commit 15a7ebb4d426c5ce196684347d2b7cafad862626 +Subproject commit a61afe5f75d969c4561a1d0ad753aa23cee6329a diff --git a/3rdparty/llvm/CMakeLists.txt b/3rdparty/llvm/CMakeLists.txt index d1295886d8..8723728f4a 100644 --- a/3rdparty/llvm/CMakeLists.txt +++ b/3rdparty/llvm/CMakeLists.txt @@ -66,10 +66,12 @@ if(WITH_LLVM) find_package(LLVM CONFIG) if (NOT LLVM_FOUND) - message(FATAL_ERROR "Can't find LLVM libraries from the CMAKE_PREFIX_PATH path or LLVM_DIR. Enable BUILD_LLVM option to build LLVM from included as a git submodule.") + message(FATAL_ERROR "Can't find LLVM libraries from the CMAKE_PREFIX_PATH path or LLVM_DIR. \ + Enable BUILD_LLVM option to build LLVM from included as a git submodule.") endif() if (LLVM_VERSION VERSION_LESS 18) - message(FATAL_ERROR "Found LLVM version ${LLVM_VERSION}. Required version 18 or above. Enable BUILD_LLVM option to build LLVM from included as a git submodule.") + message(FATAL_ERROR "Found LLVM version ${LLVM_VERSION}. Required version 18 or above. \ + Enable BUILD_LLVM option to build LLVM from included as a git submodule.") endif() endif() diff --git a/3rdparty/miniupnp/miniupnp b/3rdparty/miniupnp/miniupnp index d66872e34d..7f189988a0 160000 --- a/3rdparty/miniupnp/miniupnp +++ b/3rdparty/miniupnp/miniupnp @@ -1 +1 @@ -Subproject commit d66872e34d9ff83a07f8b71371b13419b2089953 +Subproject commit 7f189988a0decca0ab7da89000051ab91751f70d diff --git a/3rdparty/opencv/opencv b/3rdparty/opencv/opencv index 50fb5e701d..f76628fb5b 160000 --- a/3rdparty/opencv/opencv +++ b/3rdparty/opencv/opencv @@ -1 +1 @@ -Subproject commit 50fb5e701d8b0d3fe8262ed84668a94cc8cbf0b1 +Subproject commit f76628fb5b25746fcb75a7ce85be0d8c6439fc57 diff --git a/3rdparty/qt6.cmake b/3rdparty/qt6.cmake index ef89bdab05..259727879e 100644 --- a/3rdparty/qt6.cmake +++ b/3rdparty/qt6.cmake @@ -32,7 +32,8 @@ Find the correct ppa at https://launchpad.net/~beineri and follow the instructio else() message("CMake was unable to find Qt6!") if(WIN32) - message(FATAL_ERROR "Make sure the Qt6_ROOT environment variable has been set properly. (for example C:\\Qt\\${QT_MIN_VER}\\msvc2022_64\\)") + message(FATAL_ERROR "Make sure the QTDIR env variable has been set properly. (for example C:\\Qt\\${QT_MIN_VER}\\msvc2019_64\\) +You can also try setting the Qt6_DIR preprocessor definiton.") elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") message(FATAL_ERROR "Make sure to install your distro's qt6 package!") else() diff --git a/3rdparty/robin_hood/include/robin_hood.h b/3rdparty/robin_hood/include/robin_hood.h new file mode 100644 index 0000000000..12b230defb --- /dev/null +++ b/3rdparty/robin_hood/include/robin_hood.h @@ -0,0 +1,2551 @@ +// ______ _____ ______ _________ +// ______________ ___ /_ ___(_)_______ ___ /_ ______ ______ ______ / +// __ ___/_ __ \__ __ \__ / __ __ \ __ __ \_ __ \_ __ \_ __ / +// _ / / /_/ /_ /_/ /_ / _ / / / _ / / // /_/ // /_/ // /_/ / +// /_/ \____/ /_.___/ /_/ /_/ /_/ ________/_/ /_/ \____/ \____/ \__,_/ +// _/_____/ +// +// Fast & memory efficient hashtable based on robin hood hashing for C++11/14/17/20 +// https://github.com/martinus/robin-hood-hashing +// +// Licensed under the MIT License . +// SPDX-License-Identifier: MIT +// Copyright (c) 2018-2021 Martin Ankerl +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef ROBIN_HOOD_H_INCLUDED +#define ROBIN_HOOD_H_INCLUDED + +// see https://semver.org/ +#define ROBIN_HOOD_VERSION_MAJOR 3 // for incompatible API changes +#define ROBIN_HOOD_VERSION_MINOR 11 // for adding functionality in a backwards-compatible manner +#define ROBIN_HOOD_VERSION_PATCH 5 // for backwards-compatible bug fixes + +#include +#include +#include +#include +#include +#include // only to support hash of smart pointers +#include +#include +#include +#include +#if __cplusplus >= 201703L +# include +#endif + +// #define ROBIN_HOOD_LOG_ENABLED +#ifdef ROBIN_HOOD_LOG_ENABLED +# include +# define ROBIN_HOOD_LOG(...) \ + std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl; +#else +# define ROBIN_HOOD_LOG(x) +#endif + +// #define ROBIN_HOOD_TRACE_ENABLED +#ifdef ROBIN_HOOD_TRACE_ENABLED +# include +# define ROBIN_HOOD_TRACE(...) \ + std::cout << __FUNCTION__ << "@" << __LINE__ << ": " << __VA_ARGS__ << std::endl; +#else +# define ROBIN_HOOD_TRACE(x) +#endif + +// #define ROBIN_HOOD_COUNT_ENABLED +#ifdef ROBIN_HOOD_COUNT_ENABLED +# include +# define ROBIN_HOOD_COUNT(x) ++counts().x; +namespace robin_hood { + struct Counts { + uint64_t shiftUp{}; + uint64_t shiftDown{}; + }; + inline std::ostream& operator<<(std::ostream& os, Counts const& c) { + return os << c.shiftUp << " shiftUp" << std::endl << c.shiftDown << " shiftDown" << std::endl; + } + + static Counts& counts() { + static Counts counts{}; + return counts; + } +} // namespace robin_hood +#else +# define ROBIN_HOOD_COUNT(x) +#endif + +// all non-argument macros should use this facility. See +// https://www.fluentcpp.com/2019/05/28/better-macros-better-flags/ +#define ROBIN_HOOD(x) ROBIN_HOOD_PRIVATE_DEFINITION_##x() + +// mark unused members with this macro +#define ROBIN_HOOD_UNUSED(identifier) + +// bitness +#if SIZE_MAX == UINT32_MAX +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 32 +#elif SIZE_MAX == UINT64_MAX +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITNESS() 64 +#else +# error Unsupported bitness +#endif + +// endianess +#ifdef _MSC_VER +# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() 1 +# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() 0 +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_LITTLE_ENDIAN() \ + (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define ROBIN_HOOD_PRIVATE_DEFINITION_BIG_ENDIAN() (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +#endif + +// inline +#ifdef _MSC_VER +# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __declspec(noinline) +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_NOINLINE() __attribute__((noinline)) +#endif + +// exceptions +#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 0 +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_EXCEPTIONS() 1 +#endif + +// count leading/trailing bits +#if !defined(ROBIN_HOOD_DISABLE_INTRINSICS) +# ifdef _MSC_VER +# if ROBIN_HOOD(BITNESS) == 32 +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_BITSCANFORWARD() _BitScanForward64 +# endif +# include +# pragma intrinsic(ROBIN_HOOD(BITSCANFORWARD)) +# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) \ + [](size_t mask) noexcept -> int { \ + unsigned long index; \ + return ROBIN_HOOD(BITSCANFORWARD)(&index, mask) ? static_cast(index) \ + : ROBIN_HOOD(BITNESS); \ + }(x) +# else +# if ROBIN_HOOD(BITNESS) == 32 +# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzl +# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzl +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_CTZ() __builtin_ctzll +# define ROBIN_HOOD_PRIVATE_DEFINITION_CLZ() __builtin_clzll +# endif +# define ROBIN_HOOD_COUNT_LEADING_ZEROES(x) ((x) ? ROBIN_HOOD(CLZ)(x) : ROBIN_HOOD(BITNESS)) +# define ROBIN_HOOD_COUNT_TRAILING_ZEROES(x) ((x) ? ROBIN_HOOD(CTZ)(x) : ROBIN_HOOD(BITNESS)) +# endif +#endif + +// fallthrough +#ifndef __has_cpp_attribute // For backwards compatibility +# define __has_cpp_attribute(x) 0 +#endif +#if __has_cpp_attribute(clang::fallthrough) +# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[clang::fallthrough]] +#elif __has_cpp_attribute(gnu::fallthrough) +# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() [[gnu::fallthrough]] +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_FALLTHROUGH() +#endif + +// likely/unlikely +#ifdef _MSC_VER +# define ROBIN_HOOD_LIKELY(condition) condition +# define ROBIN_HOOD_UNLIKELY(condition) condition +#else +# define ROBIN_HOOD_LIKELY(condition) __builtin_expect(condition, 1) +# define ROBIN_HOOD_UNLIKELY(condition) __builtin_expect(condition, 0) +#endif + +// detect if native wchar_t type is availiable in MSVC +#ifdef _MSC_VER +# ifdef _NATIVE_WCHAR_T_DEFINED +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1 +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 0 +# endif +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_HAS_NATIVE_WCHART() 1 +#endif + +// detect if MSVC supports the pair(std::piecewise_construct_t,...) consructor being constexpr +#ifdef _MSC_VER +# if _MSC_VER <= 1900 +# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 1 +# else +# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0 +# endif +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_BROKEN_CONSTEXPR() 0 +#endif + +// workaround missing "is_trivially_copyable" in g++ < 5.0 +// See https://stackoverflow.com/a/31798726/48181 +#if defined(__GNUC__) && __GNUC__ < 5 && !defined(__clang__) +# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) __has_trivial_copy(__VA_ARGS__) +#else +# define ROBIN_HOOD_IS_TRIVIALLY_COPYABLE(...) std::is_trivially_copyable<__VA_ARGS__>::value +#endif + +// helpers for C++ versions, see https://gcc.gnu.org/onlinedocs/cpp/Standard-Predefined-Macros.html +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX() __cplusplus +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX98() 199711L +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX11() 201103L +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX14() 201402L +#define ROBIN_HOOD_PRIVATE_DEFINITION_CXX17() 201703L + +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17) +# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() [[nodiscard]] +#else +# define ROBIN_HOOD_PRIVATE_DEFINITION_NODISCARD() +#endif + +namespace robin_hood { + +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14) +# define ROBIN_HOOD_STD std +#else + + // c++11 compatibility layer + namespace ROBIN_HOOD_STD { + template + struct alignment_of + : std::integral_constant::type)> {}; + + template + class integer_sequence { + public: + using value_type = T; + static_assert(std::is_integral::value, "not integral type"); + static constexpr std::size_t size() noexcept { + return sizeof...(Ints); + } + }; + template + using index_sequence = integer_sequence; + + namespace detail_ { + template + struct IntSeqImpl { + using TValue = T; + static_assert(std::is_integral::value, "not integral type"); + static_assert(Begin >= 0 && Begin < End, "unexpected argument (Begin<0 || Begin<=End)"); + + template + struct IntSeqCombiner; + + template + struct IntSeqCombiner, integer_sequence> { + using TResult = integer_sequence; + }; + + using TResult = + typename IntSeqCombiner::TResult, + typename IntSeqImpl::TResult>::TResult; + }; + + template + struct IntSeqImpl { + using TValue = T; + static_assert(std::is_integral::value, "not integral type"); + static_assert(Begin >= 0, "unexpected argument (Begin<0)"); + using TResult = integer_sequence; + }; + + template + struct IntSeqImpl { + using TValue = T; + static_assert(std::is_integral::value, "not integral type"); + static_assert(Begin >= 0, "unexpected argument (Begin<0)"); + using TResult = integer_sequence; + }; + } // namespace detail_ + + template + using make_integer_sequence = typename detail_::IntSeqImpl::TResult; + + template + using make_index_sequence = make_integer_sequence; + + template + using index_sequence_for = make_index_sequence; + + } // namespace ROBIN_HOOD_STD + +#endif + + namespace detail { + + // make sure we static_cast to the correct type for hash_int +#if ROBIN_HOOD(BITNESS) == 64 + using SizeT = uint64_t; +#else + using SizeT = uint32_t; +#endif + + template + T rotr(T x, unsigned k) { + return (x >> k) | (x << (8U * sizeof(T) - k)); + } + + // This cast gets rid of warnings like "cast from 'uint8_t*' {aka 'unsigned char*'} to + // 'uint64_t*' {aka 'long unsigned int*'} increases required alignment of target type". Use with + // care! + template + inline T reinterpret_cast_no_cast_align_warning(void* ptr) noexcept { + return reinterpret_cast(ptr); + } + + template + inline T reinterpret_cast_no_cast_align_warning(void const* ptr) noexcept { + return reinterpret_cast(ptr); + } + + // make sure this is not inlined as it is slow and dramatically enlarges code, thus making other + // inlinings more difficult. Throws are also generally the slow path. + template + [[noreturn]] ROBIN_HOOD(NOINLINE) +#if ROBIN_HOOD(HAS_EXCEPTIONS) + void doThrow(Args&&... args) { + // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) + throw E(std::forward(args)...); + } +#else + void doThrow(Args&&... ROBIN_HOOD_UNUSED(args) /*unused*/) { + abort(); + } +#endif + + template + T* assertNotNull(T* t, Args&&... args) { + if (ROBIN_HOOD_UNLIKELY(nullptr == t)) { + doThrow(std::forward(args)...); + } + return t; + } + + template + inline T unaligned_load(void const* ptr) noexcept { + // using memcpy so we don't get into unaligned load problems. + // compiler should optimize this very well anyways. + T t; + std::memcpy(&t, ptr, sizeof(T)); + return t; + } + + // Allocates bulks of memory for objects of type T. This deallocates the memory in the destructor, + // and keeps a linked list of the allocated memory around. Overhead per allocation is the size of a + // pointer. + template + class BulkPoolAllocator { + public: + BulkPoolAllocator() noexcept = default; + + // does not copy anything, just creates a new allocator. + BulkPoolAllocator(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept + : mHead(nullptr) + , mListForFree(nullptr) {} + + BulkPoolAllocator(BulkPoolAllocator&& o) noexcept + : mHead(o.mHead) + , mListForFree(o.mListForFree) { + o.mListForFree = nullptr; + o.mHead = nullptr; + } + + BulkPoolAllocator& operator=(BulkPoolAllocator&& o) noexcept { + reset(); + mHead = o.mHead; + mListForFree = o.mListForFree; + o.mListForFree = nullptr; + o.mHead = nullptr; + return *this; + } + + BulkPoolAllocator& + // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp) + operator=(const BulkPoolAllocator& ROBIN_HOOD_UNUSED(o) /*unused*/) noexcept { + // does not do anything + return *this; + } + + ~BulkPoolAllocator() noexcept { + reset(); + } + + // Deallocates all allocated memory. + void reset() noexcept { + while (mListForFree) { + T* tmp = *mListForFree; + ROBIN_HOOD_LOG("std::free") + std::free(mListForFree); + mListForFree = reinterpret_cast_no_cast_align_warning(tmp); + } + mHead = nullptr; + } + + // allocates, but does NOT initialize. Use in-place new constructor, e.g. + // T* obj = pool.allocate(); + // ::new (static_cast(obj)) T(); + T* allocate() { + T* tmp = mHead; + if (!tmp) { + tmp = performAllocation(); + } + + mHead = *reinterpret_cast_no_cast_align_warning(tmp); + return tmp; + } + + // does not actually deallocate but puts it in store. + // make sure you have already called the destructor! e.g. with + // obj->~T(); + // pool.deallocate(obj); + void deallocate(T* obj) noexcept { + *reinterpret_cast_no_cast_align_warning(obj) = mHead; + mHead = obj; + } + + // Adds an already allocated block of memory to the allocator. This allocator is from now on + // responsible for freeing the data (with free()). If the provided data is not large enough to + // make use of, it is immediately freed. Otherwise it is reused and freed in the destructor. + void addOrFree(void* ptr, const size_t numBytes) noexcept { + // calculate number of available elements in ptr + if (numBytes < ALIGNMENT + ALIGNED_SIZE) { + // not enough data for at least one element. Free and return. + ROBIN_HOOD_LOG("std::free") + std::free(ptr); + } + else { + ROBIN_HOOD_LOG("add to buffer") + add(ptr, numBytes); + } + } + + void swap(BulkPoolAllocator& other) noexcept { + using std::swap; + swap(mHead, other.mHead); + swap(mListForFree, other.mListForFree); + } + + private: + // iterates the list of allocated memory to calculate how many to alloc next. + // Recalculating this each time saves us a size_t member. + // This ignores the fact that memory blocks might have been added manually with addOrFree. In + // practice, this should not matter much. + ROBIN_HOOD(NODISCARD) size_t calcNumElementsToAlloc() const noexcept { + auto tmp = mListForFree; + size_t numAllocs = MinNumAllocs; + + while (numAllocs * 2 <= MaxNumAllocs && tmp) { + auto x = reinterpret_cast(tmp); + tmp = *x; + numAllocs *= 2; + } + + return numAllocs; + } + + // WARNING: Underflow if numBytes < ALIGNMENT! This is guarded in addOrFree(). + void add(void* ptr, const size_t numBytes) noexcept { + const size_t numElements = (numBytes - ALIGNMENT) / ALIGNED_SIZE; + + auto data = reinterpret_cast(ptr); + + // link free list + auto x = reinterpret_cast(data); + *x = mListForFree; + mListForFree = data; + + // create linked list for newly allocated data + auto* const headT = + reinterpret_cast_no_cast_align_warning(reinterpret_cast(ptr) + ALIGNMENT); + + auto* const head = reinterpret_cast(headT); + + // Visual Studio compiler automatically unrolls this loop, which is pretty cool + for (size_t i = 0; i < numElements; ++i) { + *reinterpret_cast_no_cast_align_warning(head + i * ALIGNED_SIZE) = + head + (i + 1) * ALIGNED_SIZE; + } + + // last one points to 0 + *reinterpret_cast_no_cast_align_warning(head + (numElements - 1) * ALIGNED_SIZE) = + mHead; + mHead = headT; + } + + // Called when no memory is available (mHead == 0). + // Don't inline this slow path. + ROBIN_HOOD(NOINLINE) T* performAllocation() { + size_t const numElementsToAlloc = calcNumElementsToAlloc(); + + // alloc new memory: [prev |T, T, ... T] + size_t const bytes = ALIGNMENT + ALIGNED_SIZE * numElementsToAlloc; + ROBIN_HOOD_LOG("std::malloc " << bytes << " = " << ALIGNMENT << " + " << ALIGNED_SIZE + << " * " << numElementsToAlloc) + add(assertNotNull(std::malloc(bytes)), bytes); + return mHead; + } + + // enforce byte alignment of the T's +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX14) + static constexpr size_t ALIGNMENT = + (std::max)(std::alignment_of::value, std::alignment_of::value); +#else + static const size_t ALIGNMENT = + (ROBIN_HOOD_STD::alignment_of::value > ROBIN_HOOD_STD::alignment_of::value) + ? ROBIN_HOOD_STD::alignment_of::value + : +ROBIN_HOOD_STD::alignment_of::value; // the + is for walkarround +#endif + + static constexpr size_t ALIGNED_SIZE = ((sizeof(T) - 1) / ALIGNMENT + 1) * ALIGNMENT; + + static_assert(MinNumAllocs >= 1, "MinNumAllocs"); + static_assert(MaxNumAllocs >= MinNumAllocs, "MaxNumAllocs"); + static_assert(ALIGNED_SIZE >= sizeof(T*), "ALIGNED_SIZE"); + static_assert(0 == (ALIGNED_SIZE % sizeof(T*)), "ALIGNED_SIZE mod"); + static_assert(ALIGNMENT >= sizeof(T*), "ALIGNMENT"); + + T* mHead{ nullptr }; + T** mListForFree{ nullptr }; + }; + + template + struct NodeAllocator; + + // dummy allocator that does nothing + template + struct NodeAllocator { + + // we are not using the data, so just free it. + void addOrFree(void* ptr, size_t ROBIN_HOOD_UNUSED(numBytes) /*unused*/) noexcept { + ROBIN_HOOD_LOG("std::free") + std::free(ptr); + } + }; + + template + struct NodeAllocator : public BulkPoolAllocator {}; + + // c++14 doesn't have is_nothrow_swappable, and clang++ 6.0.1 doesn't like it either, so I'm making + // my own here. + namespace swappable { +#if ROBIN_HOOD(CXX) < ROBIN_HOOD(CXX17) + using std::swap; + template + struct nothrow { + static const bool value = noexcept(swap(std::declval(), std::declval())); + }; +#else + template + struct nothrow { + static const bool value = std::is_nothrow_swappable::value; + }; +#endif + } // namespace swappable + + } // namespace detail + + struct is_transparent_tag {}; + + // A custom pair implementation is used in the map because std::pair is not is_trivially_copyable, + // which means it would not be allowed to be used in std::memcpy. This struct is copyable, which is + // also tested. + template + struct pair { + using first_type = T1; + using second_type = T2; + + template ::value&& + std::is_default_constructible::value>::type> + constexpr pair() noexcept(noexcept(U1()) && noexcept(U2())) + : first() + , second() {} + + // pair constructors are explicit so we don't accidentally call this ctor when we don't have to. + explicit constexpr pair(std::pair const& o) noexcept( + noexcept(T1(std::declval())) && noexcept(T2(std::declval()))) + : first(o.first) + , second(o.second) {} + + // pair constructors are explicit so we don't accidentally call this ctor when we don't have to. + explicit constexpr pair(std::pair&& o) noexcept(noexcept( + T1(std::move(std::declval()))) && noexcept(T2(std::move(std::declval())))) + : first(std::move(o.first)) + , second(std::move(o.second)) {} + + constexpr pair(T1&& a, T2&& b) noexcept(noexcept( + T1(std::move(std::declval()))) && noexcept(T2(std::move(std::declval())))) + : first(std::move(a)) + , second(std::move(b)) {} + + template + constexpr pair(U1&& a, U2&& b) noexcept(noexcept(T1(std::forward( + std::declval()))) && noexcept(T2(std::forward(std::declval())))) + : first(std::forward(a)) + , second(std::forward(b)) {} + + template + // MSVC 2015 produces error "C2476: ‘constexpr’ constructor does not initialize all members" + // if this constructor is constexpr +#if !ROBIN_HOOD(BROKEN_CONSTEXPR) + constexpr +#endif + pair(std::piecewise_construct_t /*unused*/, std::tuple a, + std::tuple + b) noexcept(noexcept(pair(std::declval&>(), + std::declval&>(), + ROBIN_HOOD_STD::index_sequence_for(), + ROBIN_HOOD_STD::index_sequence_for()))) + : pair(a, b, ROBIN_HOOD_STD::index_sequence_for(), + ROBIN_HOOD_STD::index_sequence_for()) { + } + + // constructor called from the std::piecewise_construct_t ctor + template + pair(std::tuple& a, std::tuple& b, ROBIN_HOOD_STD::index_sequence /*unused*/, ROBIN_HOOD_STD::index_sequence /*unused*/) noexcept( + noexcept(T1(std::forward(std::get( + std::declval&>()))...)) && noexcept(T2(std:: + forward(std::get( + std::declval&>()))...))) + : first(std::forward(std::get(a))...) + , second(std::forward(std::get(b))...) { + // make visual studio compiler happy about warning about unused a & b. + // Visual studio's pair implementation disables warning 4100. + (void)a; + (void)b; + } + + void swap(pair& o) noexcept((detail::swappable::nothrow::value) && + (detail::swappable::nothrow::value)) { + using std::swap; + swap(first, o.first); + swap(second, o.second); + } + + T1 first; // NOLINT(misc-non-private-member-variables-in-classes) + T2 second; // NOLINT(misc-non-private-member-variables-in-classes) + }; + + template + inline void swap(pair& a, pair& b) noexcept( + noexcept(std::declval&>().swap(std::declval&>()))) { + a.swap(b); + } + + template + inline constexpr bool operator==(pair const& x, pair const& y) { + return (x.first == y.first) && (x.second == y.second); + } + template + inline constexpr bool operator!=(pair const& x, pair const& y) { + return !(x == y); + } + template + inline constexpr bool operator<(pair const& x, pair const& y) noexcept(noexcept( + std::declval() < std::declval()) && noexcept(std::declval() < + std::declval())) { + return x.first < y.first || (!(y.first < x.first) && x.second < y.second); + } + template + inline constexpr bool operator>(pair const& x, pair const& y) { + return y < x; + } + template + inline constexpr bool operator<=(pair const& x, pair const& y) { + return !(x > y); + } + template + inline constexpr bool operator>=(pair const& x, pair const& y) { + return !(x < y); + } + + inline size_t hash_bytes(void const* ptr, size_t len) noexcept { + static constexpr uint64_t m = UINT64_C(0xc6a4a7935bd1e995); + static constexpr uint64_t seed = UINT64_C(0xe17a1465); + static constexpr unsigned int r = 47; + + auto const* const data64 = static_cast(ptr); + uint64_t h = seed ^ (len * m); + + size_t const n_blocks = len / 8; + for (size_t i = 0; i < n_blocks; ++i) { + auto k = detail::unaligned_load(data64 + i); + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + auto const* const data8 = reinterpret_cast(data64 + n_blocks); + switch (len & 7U) { + case 7: + h ^= static_cast(data8[6]) << 48U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 6: + h ^= static_cast(data8[5]) << 40U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 5: + h ^= static_cast(data8[4]) << 32U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 4: + h ^= static_cast(data8[3]) << 24U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 3: + h ^= static_cast(data8[2]) << 16U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 2: + h ^= static_cast(data8[1]) << 8U; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + case 1: + h ^= static_cast(data8[0]); + h *= m; + ROBIN_HOOD(FALLTHROUGH); // FALLTHROUGH + default: + break; + } + + h ^= h >> r; + + // not doing the final step here, because this will be done by keyToIdx anyways + // h *= m; + // h ^= h >> r; + return static_cast(h); + } + + inline size_t hash_int(uint64_t x) noexcept { + // tried lots of different hashes, let's stick with murmurhash3. It's simple, fast, well tested, + // and doesn't need any special 128bit operations. + x ^= x >> 33U; + x *= UINT64_C(0xff51afd7ed558ccd); + x ^= x >> 33U; + + // not doing the final step here, because this will be done by keyToIdx anyways + // x *= UINT64_C(0xc4ceb9fe1a85ec53); + // x ^= x >> 33U; + return static_cast(x); + } + + // A thin wrapper around std::hash, performing an additional simple mixing step of the result. + template + struct hash : public std::hash { + size_t operator()(T const& obj) const + noexcept(noexcept(std::declval>().operator()(std::declval()))) { + // call base hash + auto result = std::hash::operator()(obj); + // return mixed of that, to be save against identity has + return hash_int(static_cast(result)); + } + }; + + template + struct hash> { + size_t operator()(std::basic_string const& str) const noexcept { + return hash_bytes(str.data(), sizeof(CharT) * str.size()); + } + }; + +#if ROBIN_HOOD(CXX) >= ROBIN_HOOD(CXX17) + template + struct hash> { + size_t operator()(std::basic_string_view const& sv) const noexcept { + return hash_bytes(sv.data(), sizeof(CharT) * sv.size()); + } + }; +#endif + + template + struct hash { + size_t operator()(T* ptr) const noexcept { + return hash_int(reinterpret_cast(ptr)); + } + }; + + template + struct hash> { + size_t operator()(std::unique_ptr const& ptr) const noexcept { + return hash_int(reinterpret_cast(ptr.get())); + } + }; + + template + struct hash> { + size_t operator()(std::shared_ptr const& ptr) const noexcept { + return hash_int(reinterpret_cast(ptr.get())); + } + }; + + template + struct hash::value>::type> { + size_t operator()(Enum e) const noexcept { + using Underlying = typename std::underlying_type::type; + return hash{}(static_cast(e)); + } + }; + +#define ROBIN_HOOD_HASH_INT(T) \ + template <> \ + struct hash { \ + size_t operator()(T const& obj) const noexcept { \ + return hash_int(static_cast(obj)); \ + } \ + } + +#if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wuseless-cast" +#endif + // see https://en.cppreference.com/w/cpp/utility/hash + ROBIN_HOOD_HASH_INT(bool); + ROBIN_HOOD_HASH_INT(char); + ROBIN_HOOD_HASH_INT(signed char); + ROBIN_HOOD_HASH_INT(unsigned char); + ROBIN_HOOD_HASH_INT(char16_t); + ROBIN_HOOD_HASH_INT(char32_t); +#if ROBIN_HOOD(HAS_NATIVE_WCHART) + ROBIN_HOOD_HASH_INT(wchar_t); +#endif + ROBIN_HOOD_HASH_INT(short); + ROBIN_HOOD_HASH_INT(unsigned short); + ROBIN_HOOD_HASH_INT(int); + ROBIN_HOOD_HASH_INT(unsigned int); + ROBIN_HOOD_HASH_INT(long); + ROBIN_HOOD_HASH_INT(long long); + ROBIN_HOOD_HASH_INT(unsigned long); + ROBIN_HOOD_HASH_INT(unsigned long long); +#if defined(__GNUC__) && !defined(__clang__) +# pragma GCC diagnostic pop +#endif + namespace detail { + + template + struct void_type { + using type = void; + }; + + template + struct has_is_transparent : public std::false_type {}; + + template + struct has_is_transparent::type> + : public std::true_type {}; + + // using wrapper classes for hash and key_equal prevents the diamond problem when the same type + // is used. see https://stackoverflow.com/a/28771920/48181 + template + struct WrapHash : public T { + WrapHash() = default; + explicit WrapHash(T const& o) noexcept(noexcept(T(std::declval()))) + : T(o) {} + }; + + template + struct WrapKeyEqual : public T { + WrapKeyEqual() = default; + explicit WrapKeyEqual(T const& o) noexcept(noexcept(T(std::declval()))) + : T(o) {} + }; + + // A highly optimized hashmap implementation, using the Robin Hood algorithm. + // + // In most cases, this map should be usable as a drop-in replacement for std::unordered_map, but + // be about 2x faster in most cases and require much less allocations. + // + // This implementation uses the following memory layout: + // + // [Node, Node, ... Node | info, info, ... infoSentinel ] + // + // * Node: either a DataNode that directly has the std::pair as member, + // or a DataNode with a pointer to std::pair. Which DataNode representation to use + // depends on how fast the swap() operation is. Heuristically, this is automatically choosen + // based on sizeof(). there are always 2^n Nodes. + // + // * info: Each Node in the map has a corresponding info byte, so there are 2^n info bytes. + // Each byte is initialized to 0, meaning the corresponding Node is empty. Set to 1 means the + // corresponding node contains data. Set to 2 means the corresponding Node is filled, but it + // actually belongs to the previous position and was pushed out because that place is already + // taken. + // + // * infoSentinel: Sentinel byte set to 1, so that iterator's ++ can stop at end() without the + // need for a idx variable. + // + // According to STL, order of templates has effect on throughput. That's why I've moved the + // boolean to the front. + // https://www.reddit.com/r/cpp/comments/ahp6iu/compile_time_binary_size_reductions_and_cs_future/eeguck4/ + template + class Table + : public WrapHash, + public WrapKeyEqual, + detail::NodeAllocator< + typename std::conditional< + std::is_void::value, Key, + robin_hood::pair::type, T>>::type, + 4, 16384, IsFlat> { + public: + static constexpr bool is_flat = IsFlat; + static constexpr bool is_map = !std::is_void::value; + static constexpr bool is_set = !is_map; + static constexpr bool is_transparent = + has_is_transparent::value && has_is_transparent::value; + + using key_type = Key; + using mapped_type = T; + using value_type = typename std::conditional< + is_set, Key, + robin_hood::pair::type, T>>::type; + using size_type = size_t; + using hasher = Hash; + using key_equal = KeyEqual; + using Self = Table; + + private: + static_assert(MaxLoadFactor100 > 10 && MaxLoadFactor100 < 100, + "MaxLoadFactor100 needs to be >10 && < 100"); + + using WHash = WrapHash; + using WKeyEqual = WrapKeyEqual; + + // configuration defaults + + // make sure we have 8 elements, needed to quickly rehash mInfo + static constexpr size_t InitialNumElements = sizeof(uint64_t); + static constexpr uint32_t InitialInfoNumBits = 5; + static constexpr uint8_t InitialInfoInc = 1U << InitialInfoNumBits; + static constexpr size_t InfoMask = InitialInfoInc - 1U; + static constexpr uint8_t InitialInfoHashShift = 0; + using DataPool = detail::NodeAllocator; + + // type needs to be wider than uint8_t. + using InfoType = uint32_t; + + // DataNode //////////////////////////////////////////////////////// + + // Primary template for the data node. We have special implementations for small and big + // objects. For large objects it is assumed that swap() is fairly slow, so we allocate these + // on the heap so swap merely swaps a pointer. + template + class DataNode {}; + + // Small: just allocate on the stack. + template + class DataNode final { + public: + template + explicit DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, Args&&... args) noexcept( + noexcept(value_type(std::forward(args)...))) + : mData(std::forward(args)...) {} + + DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode&& n) noexcept( + std::is_nothrow_move_constructible::value) + : mData(std::move(n.mData)) {} + + // doesn't do anything + void destroy(M& ROBIN_HOOD_UNUSED(map) /*unused*/) noexcept {} + void destroyDoNotDeallocate() noexcept {} + + value_type const* operator->() const noexcept { + return &mData; + } + value_type* operator->() noexcept { + return &mData; + } + + const value_type& operator*() const noexcept { + return mData; + } + + value_type& operator*() noexcept { + return mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return mData.first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type + getFirst() const noexcept { + return mData.first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() const noexcept { + return mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() noexcept { + return mData.second; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() const noexcept { + return mData.second; + } + + void swap(DataNode& o) noexcept( + noexcept(std::declval().swap(std::declval()))) { + mData.swap(o.mData); + } + + private: + value_type mData; + }; + + // big object: allocate on heap. + template + class DataNode { + public: + template + explicit DataNode(M& map, Args&&... args) + : mData(map.allocate()) { + ::new (static_cast(mData)) value_type(std::forward(args)...); + } + + DataNode(M& ROBIN_HOOD_UNUSED(map) /*unused*/, DataNode&& n) noexcept + : mData(std::move(n.mData)) {} + + void destroy(M& map) noexcept { + // don't deallocate, just put it into list of datapool. + mData->~value_type(); + map.deallocate(mData); + } + + void destroyDoNotDeallocate() noexcept { + mData->~value_type(); + } + + value_type const* operator->() const noexcept { + return mData; + } + + value_type* operator->() noexcept { + return mData; + } + + const value_type& operator*() const { + return *mData; + } + + value_type& operator*() { + return *mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return mData->first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() noexcept { + return *mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type + getFirst() const noexcept { + return mData->first; + } + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getFirst() const noexcept { + return *mData; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() noexcept { + return mData->second; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::type getSecond() const noexcept { + return mData->second; + } + + void swap(DataNode& o) noexcept { + using std::swap; + swap(mData, o.mData); + } + + private: + value_type* mData; + }; + + using Node = DataNode; + + // helpers for insertKeyPrepareEmptySpot: extract first entry (only const required) + ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(Node const& n) const noexcept { + return n.getFirst(); + } + + // in case we have void mapped_type, we are not using a pair, thus we just route k through. + // No need to disable this because it's just not used if not applicable. + ROBIN_HOOD(NODISCARD) key_type const& getFirstConst(key_type const& k) const noexcept { + return k; + } + + // in case we have non-void mapped_type, we have a standard robin_hood::pair + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::value, key_type const&>::type + getFirstConst(value_type const& vt) const noexcept { + return vt.first; + } + + // Cloner ////////////////////////////////////////////////////////// + + template + struct Cloner; + + // fast path: Just copy data, without allocating anything. + template + struct Cloner { + void operator()(M const& source, M& target) const { + auto const* const src = reinterpret_cast(source.mKeyVals); + auto* tgt = reinterpret_cast(target.mKeyVals); + auto const numElementsWithBuffer = target.calcNumElementsWithBuffer(target.mMask + 1); + std::copy(src, src + target.calcNumBytesTotal(numElementsWithBuffer), tgt); + } + }; + + template + struct Cloner { + void operator()(M const& s, M& t) const { + auto const numElementsWithBuffer = t.calcNumElementsWithBuffer(t.mMask + 1); + std::copy(s.mInfo, s.mInfo + t.calcNumBytesInfo(numElementsWithBuffer), t.mInfo); + + for (size_t i = 0; i < numElementsWithBuffer; ++i) { + if (t.mInfo[i]) { + ::new (static_cast(t.mKeyVals + i)) Node(t, *s.mKeyVals[i]); + } + } + } + }; + + // Destroyer /////////////////////////////////////////////////////// + + template + struct Destroyer {}; + + template + struct Destroyer { + void nodes(M& m) const noexcept { + m.mNumElements = 0; + } + + void nodesDoNotDeallocate(M& m) const noexcept { + m.mNumElements = 0; + } + }; + + template + struct Destroyer { + void nodes(M& m) const noexcept { + m.mNumElements = 0; + // clear also resets mInfo to 0, that's sometimes not necessary. + auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1); + + for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) { + if (0 != m.mInfo[idx]) { + Node& n = m.mKeyVals[idx]; + n.destroy(m); + n.~Node(); + } + } + } + + void nodesDoNotDeallocate(M& m) const noexcept { + m.mNumElements = 0; + // clear also resets mInfo to 0, that's sometimes not necessary. + auto const numElementsWithBuffer = m.calcNumElementsWithBuffer(m.mMask + 1); + for (size_t idx = 0; idx < numElementsWithBuffer; ++idx) { + if (0 != m.mInfo[idx]) { + Node& n = m.mKeyVals[idx]; + n.destroyDoNotDeallocate(); + n.~Node(); + } + } + } + }; + + // Iter //////////////////////////////////////////////////////////// + + struct fast_forward_tag {}; + + // generic iterator for both const_iterator and iterator. + template + // NOLINTNEXTLINE(hicpp-special-member-functions,cppcoreguidelines-special-member-functions) + class Iter { + private: + using NodePtr = typename std::conditional::type; + + public: + using difference_type = std::ptrdiff_t; + using value_type = typename Self::value_type; + using reference = typename std::conditional::type; + using pointer = typename std::conditional::type; + using iterator_category = std::forward_iterator_tag; + + // default constructed iterator can be compared to itself, but WON'T return true when + // compared to end(). + Iter() = default; + + // Rule of zero: nothing specified. The conversion constructor is only enabled for + // iterator to const_iterator, so it doesn't accidentally work as a copy ctor. + + // Conversion constructor from iterator to const_iterator. + template ::type> + // NOLINTNEXTLINE(hicpp-explicit-conversions) + Iter(Iter const& other) noexcept + : mKeyVals(other.mKeyVals) + , mInfo(other.mInfo) {} + + Iter(NodePtr valPtr, uint8_t const* infoPtr) noexcept + : mKeyVals(valPtr) + , mInfo(infoPtr) {} + + Iter(NodePtr valPtr, uint8_t const* infoPtr, + fast_forward_tag ROBIN_HOOD_UNUSED(tag) /*unused*/) noexcept + : mKeyVals(valPtr) + , mInfo(infoPtr) { + fastForward(); + } + + template ::type> + Iter& operator=(Iter const& other) noexcept { + mKeyVals = other.mKeyVals; + mInfo = other.mInfo; + return *this; + } + + // prefix increment. Undefined behavior if we are at end()! + Iter& operator++() noexcept { + mInfo++; + mKeyVals++; + fastForward(); + return *this; + } + + Iter operator++(int) noexcept { + Iter tmp = *this; + ++(*this); + return tmp; + } + + reference operator*() const { + return **mKeyVals; + } + + pointer operator->() const { + return &**mKeyVals; + } + + template + bool operator==(Iter const& o) const noexcept { + return mKeyVals == o.mKeyVals; + } + + template + bool operator!=(Iter const& o) const noexcept { + return mKeyVals != o.mKeyVals; + } + + private: + // fast forward to the next non-free info byte + // I've tried a few variants that don't depend on intrinsics, but unfortunately they are + // quite a bit slower than this one. So I've reverted that change again. See map_benchmark. + void fastForward() noexcept { + size_t n = 0; + while (0U == (n = detail::unaligned_load(mInfo))) { + mInfo += sizeof(size_t); + mKeyVals += sizeof(size_t); + } +#if defined(ROBIN_HOOD_DISABLE_INTRINSICS) + // we know for certain that within the next 8 bytes we'll find a non-zero one. + if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load(mInfo))) { + mInfo += 4; + mKeyVals += 4; + } + if (ROBIN_HOOD_UNLIKELY(0U == detail::unaligned_load(mInfo))) { + mInfo += 2; + mKeyVals += 2; + } + if (ROBIN_HOOD_UNLIKELY(0U == *mInfo)) { + mInfo += 1; + mKeyVals += 1; + } +#else +# if ROBIN_HOOD(LITTLE_ENDIAN) + auto inc = ROBIN_HOOD_COUNT_TRAILING_ZEROES(n) / 8; +# else + auto inc = ROBIN_HOOD_COUNT_LEADING_ZEROES(n) / 8; +# endif + mInfo += inc; + mKeyVals += inc; +#endif + } + + friend class Table; + NodePtr mKeyVals{ nullptr }; + uint8_t const* mInfo{ nullptr }; + }; + + //////////////////////////////////////////////////////////////////// + + // highly performance relevant code. + // Lower bits are used for indexing into the array (2^n size) + // The upper 1-5 bits need to be a reasonable good hash, to save comparisons. + template + void keyToIdx(HashKey&& key, size_t* idx, InfoType* info) const { + // In addition to whatever hash is used, add another mul & shift so we get better hashing. + // This serves as a bad hash prevention, if the given data is + // badly mixed. + auto h = static_cast(WHash::operator()(key)); + + h *= mHashMultiplier; + h ^= h >> 33U; + + // the lower InitialInfoNumBits are reserved for info. + *info = mInfoInc + static_cast((h & InfoMask) >> mInfoHashShift); + *idx = (static_cast(h) >> InitialInfoNumBits) & mMask; + } + + // forwards the index by one, wrapping around at the end + void next(InfoType* info, size_t* idx) const noexcept { + *idx = *idx + 1; + *info += mInfoInc; + } + + void nextWhileLess(InfoType* info, size_t* idx) const noexcept { + // unrolling this by hand did not bring any speedups. + while (*info < mInfo[*idx]) { + next(info, idx); + } + } + + // Shift everything up by one element. Tries to move stuff around. + void + shiftUp(size_t startIdx, + size_t const insertion_idx) noexcept(std::is_nothrow_move_assignable::value) { + auto idx = startIdx; + ::new (static_cast(mKeyVals + idx)) Node(std::move(mKeyVals[idx - 1])); + while (--idx != insertion_idx) { + mKeyVals[idx] = std::move(mKeyVals[idx - 1]); + } + + idx = startIdx; + while (idx != insertion_idx) { + ROBIN_HOOD_COUNT(shiftUp) + mInfo[idx] = static_cast(mInfo[idx - 1] + mInfoInc); + if (ROBIN_HOOD_UNLIKELY(mInfo[idx] + mInfoInc > 0xFF)) { + mMaxNumElementsAllowed = 0; + } + --idx; + } + } + + void shiftDown(size_t idx) noexcept(std::is_nothrow_move_assignable::value) { + // until we find one that is either empty or has zero offset. + // TODO(martinus) we don't need to move everything, just the last one for the same + // bucket. + mKeyVals[idx].destroy(*this); + + // until we find one that is either empty or has zero offset. + while (mInfo[idx + 1] >= 2 * mInfoInc) { + ROBIN_HOOD_COUNT(shiftDown) + mInfo[idx] = static_cast(mInfo[idx + 1] - mInfoInc); + mKeyVals[idx] = std::move(mKeyVals[idx + 1]); + ++idx; + } + + mInfo[idx] = 0; + // don't destroy, we've moved it + // mKeyVals[idx].destroy(*this); + mKeyVals[idx].~Node(); + } + + // copy of find(), except that it returns iterator instead of const_iterator. + template + ROBIN_HOOD(NODISCARD) + size_t findIdx(Other const& key) const { + size_t idx{}; + InfoType info{}; + keyToIdx(key, &idx, &info); + + do { + // unrolling this twice gives a bit of a speedup. More unrolling did not help. + if (info == mInfo[idx] && + ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) { + return idx; + } + next(&info, &idx); + if (info == mInfo[idx] && + ROBIN_HOOD_LIKELY(WKeyEqual::operator()(key, mKeyVals[idx].getFirst()))) { + return idx; + } + next(&info, &idx); + } while (info <= mInfo[idx]); + + // nothing found! + return mMask == 0 ? 0 + : static_cast(std::distance( + mKeyVals, reinterpret_cast_no_cast_align_warning(mInfo))); + } + + void cloneData(const Table& o) { + Cloner()(o, *this); + } + + // inserts a keyval that is guaranteed to be new, e.g. when the hashmap is resized. + // @return True on success, false if something went wrong + void insert_move(Node&& keyval) { + // we don't retry, fail if overflowing + // don't need to check max num elements + if (0 == mMaxNumElementsAllowed && !try_increase_info()) { + throwOverflowError(); + } + + size_t idx{}; + InfoType info{}; + keyToIdx(keyval.getFirst(), &idx, &info); + + // skip forward. Use <= because we are certain that the element is not there. + while (info <= mInfo[idx]) { + idx = idx + 1; + info += mInfoInc; + } + + // key not found, so we are now exactly where we want to insert it. + auto const insertion_idx = idx; + auto const insertion_info = static_cast(info); + if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { + mMaxNumElementsAllowed = 0; + } + + // find an empty spot + while (0 != mInfo[idx]) { + next(&info, &idx); + } + + auto& l = mKeyVals[insertion_idx]; + if (idx == insertion_idx) { + ::new (static_cast(&l)) Node(std::move(keyval)); + } + else { + shiftUp(idx, insertion_idx); + l = std::move(keyval); + } + + // put at empty spot + mInfo[insertion_idx] = insertion_info; + + ++mNumElements; + } + + public: + using iterator = Iter; + using const_iterator = Iter; + + Table() noexcept(noexcept(Hash()) && noexcept(KeyEqual())) + : WHash() + , WKeyEqual() { + ROBIN_HOOD_TRACE(this) + } + + // Creates an empty hash map. Nothing is allocated yet, this happens at the first insert. + // This tremendously speeds up ctor & dtor of a map that never receives an element. The + // penalty is payed at the first insert, and not before. Lookup of this empty map works + // because everybody points to DummyInfoByte::b. parameter bucket_count is dictated by the + // standard, but we can ignore it. + explicit Table( + size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/, const Hash& h = Hash{}, + const KeyEqual& equal = KeyEqual{}) noexcept(noexcept(Hash(h)) && noexcept(KeyEqual(equal))) + : WHash(h) + , WKeyEqual(equal) { + ROBIN_HOOD_TRACE(this) + } + + template + Table(Iter first, Iter last, size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, + const Hash& h = Hash{}, const KeyEqual& equal = KeyEqual{}) + : WHash(h) + , WKeyEqual(equal) { + ROBIN_HOOD_TRACE(this) + insert(first, last); + } + + Table(std::initializer_list initlist, + size_t ROBIN_HOOD_UNUSED(bucket_count) /*unused*/ = 0, const Hash& h = Hash{}, + const KeyEqual& equal = KeyEqual{}) + : WHash(h) + , WKeyEqual(equal) { + ROBIN_HOOD_TRACE(this) + insert(initlist.begin(), initlist.end()); + } + + Table(Table&& o) noexcept + : WHash(std::move(static_cast(o))) + , WKeyEqual(std::move(static_cast(o))) + , DataPool(std::move(static_cast(o))) { + ROBIN_HOOD_TRACE(this) + if (o.mMask) { + mHashMultiplier = std::move(o.mHashMultiplier); + mKeyVals = std::move(o.mKeyVals); + mInfo = std::move(o.mInfo); + mNumElements = std::move(o.mNumElements); + mMask = std::move(o.mMask); + mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed); + mInfoInc = std::move(o.mInfoInc); + mInfoHashShift = std::move(o.mInfoHashShift); + // set other's mask to 0 so its destructor won't do anything + o.init(); + } + } + + Table& operator=(Table&& o) noexcept { + ROBIN_HOOD_TRACE(this) + if (&o != this) { + if (o.mMask) { + // only move stuff if the other map actually has some data + destroy(); + mHashMultiplier = std::move(o.mHashMultiplier); + mKeyVals = std::move(o.mKeyVals); + mInfo = std::move(o.mInfo); + mNumElements = std::move(o.mNumElements); + mMask = std::move(o.mMask); + mMaxNumElementsAllowed = std::move(o.mMaxNumElementsAllowed); + mInfoInc = std::move(o.mInfoInc); + mInfoHashShift = std::move(o.mInfoHashShift); + WHash::operator=(std::move(static_cast(o))); + WKeyEqual::operator=(std::move(static_cast(o))); + DataPool::operator=(std::move(static_cast(o))); + + o.init(); + + } + else { + // nothing in the other map => just clear us. + clear(); + } + } + return *this; + } + + Table(const Table& o) + : WHash(static_cast(o)) + , WKeyEqual(static_cast(o)) + , DataPool(static_cast(o)) { + ROBIN_HOOD_TRACE(this) + if (!o.empty()) { + // not empty: create an exact copy. it is also possible to just iterate through all + // elements and insert them, but copying is probably faster. + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1); + auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); + + ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal(" + << numElementsWithBuffer << ")") + mHashMultiplier = o.mHashMultiplier; + mKeyVals = static_cast( + detail::assertNotNull(std::malloc(numBytesTotal))); + // no need for calloc because clonData does memcpy + mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + mNumElements = o.mNumElements; + mMask = o.mMask; + mMaxNumElementsAllowed = o.mMaxNumElementsAllowed; + mInfoInc = o.mInfoInc; + mInfoHashShift = o.mInfoHashShift; + cloneData(o); + } + } + + // Creates a copy of the given map. Copy constructor of each entry is used. + // Not sure why clang-tidy thinks this doesn't handle self assignment, it does + // NOLINTNEXTLINE(bugprone-unhandled-self-assignment,cert-oop54-cpp) + Table& operator=(Table const& o) { + ROBIN_HOOD_TRACE(this) + if (&o == this) { + // prevent assigning of itself + return *this; + } + + // we keep using the old allocator and not assign the new one, because we want to keep + // the memory available. when it is the same size. + if (o.empty()) { + if (0 == mMask) { + // nothing to do, we are empty too + return *this; + } + + // not empty: destroy what we have there + // clear also resets mInfo to 0, that's sometimes not necessary. + destroy(); + init(); + WHash::operator=(static_cast(o)); + WKeyEqual::operator=(static_cast(o)); + DataPool::operator=(static_cast(o)); + + return *this; + } + + // clean up old stuff + Destroyer::value>{}.nodes(*this); + + if (mMask != o.mMask) { + // no luck: we don't have the same array size allocated, so we need to realloc. + if (0 != mMask) { + // only deallocate if we actually have data! + ROBIN_HOOD_LOG("std::free") + std::free(mKeyVals); + } + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(o.mMask + 1); + auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); + ROBIN_HOOD_LOG("std::malloc " << numBytesTotal << " = calcNumBytesTotal(" + << numElementsWithBuffer << ")") + mKeyVals = static_cast( + detail::assertNotNull(std::malloc(numBytesTotal))); + + // no need for calloc here because cloneData performs a memcpy. + mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + // sentinel is set in cloneData + } + WHash::operator=(static_cast(o)); + WKeyEqual::operator=(static_cast(o)); + DataPool::operator=(static_cast(o)); + mHashMultiplier = o.mHashMultiplier; + mNumElements = o.mNumElements; + mMask = o.mMask; + mMaxNumElementsAllowed = o.mMaxNumElementsAllowed; + mInfoInc = o.mInfoInc; + mInfoHashShift = o.mInfoHashShift; + cloneData(o); + + return *this; + } + + // Swaps everything between the two maps. + void swap(Table& o) { + ROBIN_HOOD_TRACE(this) + using std::swap; + swap(o, *this); + } + + // Clears all data, without resizing. + void clear() { + ROBIN_HOOD_TRACE(this) + if (empty()) { + // don't do anything! also important because we don't want to write to + // DummyInfoByte::b, even though we would just write 0 to it. + return; + } + + Destroyer::value>{}.nodes(*this); + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); + // clear everything, then set the sentinel again + uint8_t const z = 0; + std::fill(mInfo, mInfo + calcNumBytesInfo(numElementsWithBuffer), z); + mInfo[numElementsWithBuffer] = 1; + + mInfoInc = InitialInfoInc; + mInfoHashShift = InitialInfoHashShift; + } + + // Destroys the map and all it's contents. + ~Table() { + ROBIN_HOOD_TRACE(this) + destroy(); + } + + // Checks if both tables contain the same entries. Order is irrelevant. + bool operator==(const Table& other) const { + ROBIN_HOOD_TRACE(this) + if (other.size() != size()) { + return false; + } + for (auto const& otherEntry : other) { + if (!has(otherEntry)) { + return false; + } + } + + return true; + } + + bool operator!=(const Table& other) const { + ROBIN_HOOD_TRACE(this) + return !operator==(other); + } + + template + typename std::enable_if::value, Q&>::type operator[](const key_type& key) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) + Node(*this, std::piecewise_construct, std::forward_as_tuple(key), + std::forward_as_tuple()); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct, + std::forward_as_tuple(key), std::forward_as_tuple()); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + } + + return mKeyVals[idxAndState.first].getSecond(); + } + + template + typename std::enable_if::value, Q&>::type operator[](key_type&& key) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) + Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)), + std::forward_as_tuple()); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = + Node(*this, std::piecewise_construct, std::forward_as_tuple(std::move(key)), + std::forward_as_tuple()); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + } + + return mKeyVals[idxAndState.first].getSecond(); + } + + template + void insert(Iter first, Iter last) { + for (; first != last; ++first) { + // value_type ctor needed because this might be called with std::pair's + insert(value_type(*first)); + } + } + + void insert(std::initializer_list ilist) { + for (auto&& vt : ilist) { + insert(std::move(vt)); + } + } + + template + std::pair emplace(Args&&... args) { + ROBIN_HOOD_TRACE(this) + Node n { + *this, std::forward(args)... + }; + auto idxAndState = insertKeyPrepareEmptySpot(getFirstConst(n)); + switch (idxAndState.second) { + case InsertionState::key_found: + n.destroy(*this); + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) Node(*this, std::move(n)); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = std::move(n); + break; + + case InsertionState::overflow_error: + n.destroy(*this); + throwOverflowError(); + break; + } + + return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first), + InsertionState::key_found != idxAndState.second); + } + + template + iterator emplace_hint(const_iterator position, Args&&... args) { + (void)position; + return emplace(std::forward(args)...).first; + } + + template + std::pair try_emplace(const key_type& key, Args&&... args) { + return try_emplace_impl(key, std::forward(args)...); + } + + template + std::pair try_emplace(key_type&& key, Args&&... args) { + return try_emplace_impl(std::move(key), std::forward(args)...); + } + + template + iterator try_emplace(const_iterator hint, const key_type& key, Args&&... args) { + (void)hint; + return try_emplace_impl(key, std::forward(args)...).first; + } + + template + iterator try_emplace(const_iterator hint, key_type&& key, Args&&... args) { + (void)hint; + return try_emplace_impl(std::move(key), std::forward(args)...).first; + } + + template + std::pair insert_or_assign(const key_type& key, Mapped&& obj) { + return insertOrAssignImpl(key, std::forward(obj)); + } + + template + std::pair insert_or_assign(key_type&& key, Mapped&& obj) { + return insertOrAssignImpl(std::move(key), std::forward(obj)); + } + + template + iterator insert_or_assign(const_iterator hint, const key_type& key, Mapped&& obj) { + (void)hint; + return insertOrAssignImpl(key, std::forward(obj)).first; + } + + template + iterator insert_or_assign(const_iterator hint, key_type&& key, Mapped&& obj) { + (void)hint; + return insertOrAssignImpl(std::move(key), std::forward(obj)).first; + } + + std::pair insert(const value_type& keyval) { + ROBIN_HOOD_TRACE(this) + return emplace(keyval); + } + + iterator insert(const_iterator hint, const value_type& keyval) { + (void)hint; + return emplace(keyval).first; + } + + std::pair insert(value_type&& keyval) { + return emplace(std::move(keyval)); + } + + iterator insert(const_iterator hint, value_type&& keyval) { + (void)hint; + return emplace(std::move(keyval)).first; + } + + // Returns 1 if key is found, 0 otherwise. + size_t count(const key_type& key) const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv != reinterpret_cast_no_cast_align_warning(mInfo)) { + return 1; + } + return 0; + } + + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::type count(const OtherKey& key) const { + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv != reinterpret_cast_no_cast_align_warning(mInfo)) { + return 1; + } + return 0; + } + + bool contains(const key_type& key) const { // NOLINT(modernize-use-nodiscard) + return 1U == count(key); + } + + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::type contains(const OtherKey& key) const { + return 1U == count(key); + } + + // Returns a reference to the value found for key. + // Throws std::out_of_range if element cannot be found + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::value, Q&>::type at(key_type const& key) { + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv == reinterpret_cast_no_cast_align_warning(mInfo)) { + doThrow("key not found"); + } + return kv->getSecond(); + } + + // Returns a reference to the value found for key. + // Throws std::out_of_range if element cannot be found + template + // NOLINTNEXTLINE(modernize-use-nodiscard) + typename std::enable_if::value, Q const&>::type at(key_type const& key) const { + ROBIN_HOOD_TRACE(this) + auto kv = mKeyVals + findIdx(key); + if (kv == reinterpret_cast_no_cast_align_warning(mInfo)) { + doThrow("key not found"); + } + return kv->getSecond(); + } + + const_iterator find(const key_type& key) const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return const_iterator{ mKeyVals + idx, mInfo + idx }; + } + + template + const_iterator find(const OtherKey& key, is_transparent_tag /*unused*/) const { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return const_iterator{ mKeyVals + idx, mInfo + idx }; + } + + template + typename std::enable_if::type // NOLINT(modernize-use-nodiscard) + find(const OtherKey& key) const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return const_iterator{ mKeyVals + idx, mInfo + idx }; + } + + iterator find(const key_type& key) { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return iterator{ mKeyVals + idx, mInfo + idx }; + } + + template + iterator find(const OtherKey& key, is_transparent_tag /*unused*/) { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return iterator{ mKeyVals + idx, mInfo + idx }; + } + + template + typename std::enable_if::type find(const OtherKey& key) { + ROBIN_HOOD_TRACE(this) + const size_t idx = findIdx(key); + return iterator{ mKeyVals + idx, mInfo + idx }; + } + + iterator begin() { + ROBIN_HOOD_TRACE(this) + if (empty()) { + return end(); + } + return iterator(mKeyVals, mInfo, fast_forward_tag{}); + } + const_iterator begin() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return cbegin(); + } + const_iterator cbegin() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + if (empty()) { + return cend(); + } + return const_iterator(mKeyVals, mInfo, fast_forward_tag{}); + } + + iterator end() { + ROBIN_HOOD_TRACE(this) + // no need to supply valid info pointer: end() must not be dereferenced, and only node + // pointer is compared. + return iterator{ reinterpret_cast_no_cast_align_warning(mInfo), nullptr }; + } + const_iterator end() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return cend(); + } + const_iterator cend() const { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return const_iterator{ reinterpret_cast_no_cast_align_warning(mInfo), nullptr }; + } + + iterator erase(const_iterator pos) { + ROBIN_HOOD_TRACE(this) + // its safe to perform const cast here + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast) + return erase(iterator{ const_cast(pos.mKeyVals), const_cast(pos.mInfo) }); + } + + // Erases element at pos, returns iterator to the next element. + iterator erase(iterator pos) { + ROBIN_HOOD_TRACE(this) + // we assume that pos always points to a valid entry, and not end(). + auto const idx = static_cast(pos.mKeyVals - mKeyVals); + + shiftDown(idx); + --mNumElements; + + if (*pos.mInfo) { + // we've backward shifted, return this again + return pos; + } + + // no backward shift, return next element + return ++pos; + } + + size_t erase(const key_type& key) { + ROBIN_HOOD_TRACE(this) + size_t idx {}; + InfoType info{}; + keyToIdx(key, &idx, &info); + + // check while info matches with the source idx + do { + if (info == mInfo[idx] && WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) { + shiftDown(idx); + --mNumElements; + return 1; + } + next(&info, &idx); + } while (info <= mInfo[idx]); + + // nothing found to delete + return 0; + } + + // reserves space for the specified number of elements. Makes sure the old data fits. + // exactly the same as reserve(c). + void rehash(size_t c) { + // forces a reserve + reserve(c, true); + } + + // reserves space for the specified number of elements. Makes sure the old data fits. + // Exactly the same as rehash(c). Use rehash(0) to shrink to fit. + void reserve(size_t c) { + // reserve, but don't force rehash + reserve(c, false); + } + + // If possible reallocates the map to a smaller one. This frees the underlying table. + // Does not do anything if load_factor is too large for decreasing the table's size. + void compact() { + ROBIN_HOOD_TRACE(this) + auto newSize = InitialNumElements; + while (calcMaxNumElementsAllowed(newSize) < mNumElements && newSize != 0) { + newSize *= 2; + } + if (ROBIN_HOOD_UNLIKELY(newSize == 0)) { + throwOverflowError(); + } + + ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1") + + // only actually do anything when the new size is bigger than the old one. This prevents to + // continuously allocate for each reserve() call. + if (newSize < mMask + 1) { + rehashPowerOfTwo(newSize, true); + } + } + + size_type size() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return mNumElements; + } + + size_type max_size() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return static_cast(-1); + } + + ROBIN_HOOD(NODISCARD) bool empty() const noexcept { + ROBIN_HOOD_TRACE(this) + return 0 == mNumElements; + } + + float max_load_factor() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return MaxLoadFactor100 / 100.0F; + } + + // Average number of elements per bucket. Since we allow only 1 per bucket + float load_factor() const noexcept { // NOLINT(modernize-use-nodiscard) + ROBIN_HOOD_TRACE(this) + return static_cast(size()) / static_cast(mMask + 1); + } + + ROBIN_HOOD(NODISCARD) size_t mask() const noexcept { + ROBIN_HOOD_TRACE(this) + return mMask; + } + + ROBIN_HOOD(NODISCARD) size_t calcMaxNumElementsAllowed(size_t maxElements) const noexcept { + if (ROBIN_HOOD_LIKELY(maxElements <= (std::numeric_limits::max)() / 100)) { + return maxElements * MaxLoadFactor100 / 100; + } + + // we might be a bit inprecise, but since maxElements is quite large that doesn't matter + return (maxElements / 100) * MaxLoadFactor100; + } + + ROBIN_HOOD(NODISCARD) size_t calcNumBytesInfo(size_t numElements) const noexcept { + // we add a uint64_t, which houses the sentinel (first byte) and padding so we can load + // 64bit types. + return numElements + sizeof(uint64_t); + } + + ROBIN_HOOD(NODISCARD) + size_t calcNumElementsWithBuffer(size_t numElements) const noexcept { + auto maxNumElementsAllowed = calcMaxNumElementsAllowed(numElements); + return numElements + (std::min)(maxNumElementsAllowed, (static_cast(0xFF))); + } + + // calculation only allowed for 2^n values + ROBIN_HOOD(NODISCARD) size_t calcNumBytesTotal(size_t numElements) const { +#if ROBIN_HOOD(BITNESS) == 64 + return numElements * sizeof(Node) + calcNumBytesInfo(numElements); +#else + // make sure we're doing 64bit operations, so we are at least safe against 32bit overflows. + auto const ne = static_cast(numElements); + auto const s = static_cast(sizeof(Node)); + auto const infos = static_cast(calcNumBytesInfo(numElements)); + + auto const total64 = ne * s + infos; + auto const total = static_cast(total64); + + if (ROBIN_HOOD_UNLIKELY(static_cast(total) != total64)) { + throwOverflowError(); + } + return total; +#endif + } + + private: + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::value, bool>::type has(const value_type& e) const { + ROBIN_HOOD_TRACE(this) + auto it = find(e.first); + return it != end() && it->second == e.second; + } + + template + ROBIN_HOOD(NODISCARD) + typename std::enable_if::value, bool>::type has(const value_type& e) const { + ROBIN_HOOD_TRACE(this) + return find(e) != end(); + } + + void reserve(size_t c, bool forceRehash) { + ROBIN_HOOD_TRACE(this) + auto const minElementsAllowed = (std::max)(c, mNumElements); + auto newSize = InitialNumElements; + while (calcMaxNumElementsAllowed(newSize) < minElementsAllowed && newSize != 0) { + newSize *= 2; + } + if (ROBIN_HOOD_UNLIKELY(newSize == 0)) { + throwOverflowError(); + } + + ROBIN_HOOD_LOG("newSize > mMask + 1: " << newSize << " > " << mMask << " + 1") + + // only actually do anything when the new size is bigger than the old one. This prevents to + // continuously allocate for each reserve() call. + if (forceRehash || newSize > mMask + 1) { + rehashPowerOfTwo(newSize, false); + } + } + + // reserves space for at least the specified number of elements. + // only works if numBuckets if power of two + // True on success, false otherwise + void rehashPowerOfTwo(size_t numBuckets, bool forceFree) { + ROBIN_HOOD_TRACE(this) + + Node* const oldKeyVals = mKeyVals; + uint8_t const* const oldInfo = mInfo; + + const size_t oldMaxElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); + + // resize operation: move stuff + initData(numBuckets); + if (oldMaxElementsWithBuffer > 1) { + for (size_t i = 0; i < oldMaxElementsWithBuffer; ++i) { + if (oldInfo[i] != 0) { + // might throw an exception, which is really bad since we are in the middle of + // moving stuff. + insert_move(std::move(oldKeyVals[i])); + // destroy the node but DON'T destroy the data. + oldKeyVals[i].~Node(); + } + } + + // this check is not necessary as it's guarded by the previous if, but it helps + // silence g++'s overeager "attempt to free a non-heap object 'map' + // [-Werror=free-nonheap-object]" warning. + if (oldKeyVals != reinterpret_cast_no_cast_align_warning(&mMask)) { + // don't destroy old data: put it into the pool instead + if (forceFree) { + std::free(oldKeyVals); + } + else { + DataPool::addOrFree(oldKeyVals, calcNumBytesTotal(oldMaxElementsWithBuffer)); + } + } + } + } + + ROBIN_HOOD(NOINLINE) void throwOverflowError() const { +#if ROBIN_HOOD(HAS_EXCEPTIONS) + throw std::overflow_error("robin_hood::map overflow"); +#else + abort(); +#endif + } + + template + std::pair try_emplace_impl(OtherKey&& key, Args&&... args) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) Node( + *this, std::piecewise_construct, std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + break; + } + + return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first), + InsertionState::key_found != idxAndState.second); + } + + template + std::pair insertOrAssignImpl(OtherKey&& key, Mapped&& obj) { + ROBIN_HOOD_TRACE(this) + auto idxAndState = insertKeyPrepareEmptySpot(key); + switch (idxAndState.second) { + case InsertionState::key_found: + mKeyVals[idxAndState.first].getSecond() = std::forward(obj); + break; + + case InsertionState::new_node: + ::new (static_cast(&mKeyVals[idxAndState.first])) Node( + *this, std::piecewise_construct, std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(obj))); + break; + + case InsertionState::overwrite_node: + mKeyVals[idxAndState.first] = Node(*this, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(obj))); + break; + + case InsertionState::overflow_error: + throwOverflowError(); + break; + } + + return std::make_pair(iterator(mKeyVals + idxAndState.first, mInfo + idxAndState.first), + InsertionState::key_found != idxAndState.second); + } + + void initData(size_t max_elements) { + mNumElements = 0; + mMask = max_elements - 1; + mMaxNumElementsAllowed = calcMaxNumElementsAllowed(max_elements); + + auto const numElementsWithBuffer = calcNumElementsWithBuffer(max_elements); + + // malloc & zero mInfo. Faster than calloc everything. + auto const numBytesTotal = calcNumBytesTotal(numElementsWithBuffer); + ROBIN_HOOD_LOG("std::calloc " << numBytesTotal << " = calcNumBytesTotal(" + << numElementsWithBuffer << ")") + mKeyVals = reinterpret_cast( + detail::assertNotNull(std::malloc(numBytesTotal))); + mInfo = reinterpret_cast(mKeyVals + numElementsWithBuffer); + std::memset(mInfo, 0, numBytesTotal - numElementsWithBuffer * sizeof(Node)); + + // set sentinel + mInfo[numElementsWithBuffer] = 1; + + mInfoInc = InitialInfoInc; + mInfoHashShift = InitialInfoHashShift; + } + + enum class InsertionState { overflow_error, key_found, new_node, overwrite_node }; + + // Finds key, and if not already present prepares a spot where to pot the key & value. + // This potentially shifts nodes out of the way, updates mInfo and number of inserted + // elements, so the only operation left to do is create/assign a new node at that spot. + template + std::pair insertKeyPrepareEmptySpot(OtherKey&& key) { + for (int i = 0; i < 256; ++i) { + size_t idx{}; + InfoType info{}; + keyToIdx(key, &idx, &info); + nextWhileLess(&info, &idx); + + // while we potentially have a match + while (info == mInfo[idx]) { + if (WKeyEqual::operator()(key, mKeyVals[idx].getFirst())) { + // key already exists, do NOT insert. + // see http://en.cppreference.com/w/cpp/container/unordered_map/insert + return std::make_pair(idx, InsertionState::key_found); + } + next(&info, &idx); + } + + // unlikely that this evaluates to true + if (ROBIN_HOOD_UNLIKELY(mNumElements >= mMaxNumElementsAllowed)) { + if (!increase_size()) { + return std::make_pair(size_t(0), InsertionState::overflow_error); + } + continue; + } + + // key not found, so we are now exactly where we want to insert it. + auto const insertion_idx = idx; + auto const insertion_info = info; + if (ROBIN_HOOD_UNLIKELY(insertion_info + mInfoInc > 0xFF)) { + mMaxNumElementsAllowed = 0; + } + + // find an empty spot + while (0 != mInfo[idx]) { + next(&info, &idx); + } + + if (idx != insertion_idx) { + shiftUp(idx, insertion_idx); + } + // put at empty spot + mInfo[insertion_idx] = static_cast(insertion_info); + ++mNumElements; + return std::make_pair(insertion_idx, idx == insertion_idx + ? InsertionState::new_node + : InsertionState::overwrite_node); + } + + // enough attempts failed, so finally give up. + return std::make_pair(size_t(0), InsertionState::overflow_error); + } + + bool try_increase_info() { + ROBIN_HOOD_LOG("mInfoInc=" << mInfoInc << ", numElements=" << mNumElements + << ", maxNumElementsAllowed=" + << calcMaxNumElementsAllowed(mMask + 1)) + if (mInfoInc <= 2) { + // need to be > 2 so that shift works (otherwise undefined behavior!) + return false; + } + // we got space left, try to make info smaller + mInfoInc = static_cast(mInfoInc >> 1U); + + // remove one bit of the hash, leaving more space for the distance info. + // This is extremely fast because we can operate on 8 bytes at once. + ++mInfoHashShift; + auto const numElementsWithBuffer = calcNumElementsWithBuffer(mMask + 1); + + for (size_t i = 0; i < numElementsWithBuffer; i += 8) { + auto val = unaligned_load(mInfo + i); + val = (val >> 1U) & UINT64_C(0x7f7f7f7f7f7f7f7f); + std::memcpy(mInfo + i, &val, sizeof(val)); + } + // update sentinel, which might have been cleared out! + mInfo[numElementsWithBuffer] = 1; + + mMaxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1); + return true; + } + + // True if resize was possible, false otherwise + bool increase_size() { + // nothing allocated yet? just allocate InitialNumElements + if (0 == mMask) { + initData(InitialNumElements); + return true; + } + + auto const maxNumElementsAllowed = calcMaxNumElementsAllowed(mMask + 1); + if (mNumElements < maxNumElementsAllowed && try_increase_info()) { + return true; + } + + ROBIN_HOOD_LOG("mNumElements=" << mNumElements << ", maxNumElementsAllowed=" + << maxNumElementsAllowed << ", load=" + << (static_cast(mNumElements) * 100.0 / + (static_cast(mMask) + 1))) + + if (mNumElements * 2 < calcMaxNumElementsAllowed(mMask + 1)) { + // we have to resize, even though there would still be plenty of space left! + // Try to rehash instead. Delete freed memory so we don't steadyily increase mem in case + // we have to rehash a few times + nextHashMultiplier(); + rehashPowerOfTwo(mMask + 1, true); + } + else { + // we've reached the capacity of the map, so the hash seems to work nice. Keep using it. + rehashPowerOfTwo((mMask + 1) * 2, false); + } + return true; + } + + void nextHashMultiplier() { + // adding an *even* number, so that the multiplier will always stay odd. This is necessary + // so that the hash stays a mixing function (and thus doesn't have any information loss). + mHashMultiplier += UINT64_C(0xc4ceb9fe1a85ec54); + } + + void destroy() { + if (0 == mMask) { + // don't deallocate! + return; + } + + Destroyer::value>{} + .nodesDoNotDeallocate(*this); + + // This protection against not deleting mMask shouldn't be needed as it's sufficiently + // protected with the 0==mMask check, but I have this anyways because g++ 7 otherwise + // reports a compile error: attempt to free a non-heap object 'fm' + // [-Werror=free-nonheap-object] + if (mKeyVals != reinterpret_cast_no_cast_align_warning(&mMask)) { + ROBIN_HOOD_LOG("std::free") + std::free(mKeyVals); + } + } + + void init() noexcept { + mKeyVals = reinterpret_cast_no_cast_align_warning(&mMask); + mInfo = reinterpret_cast(&mMask); + mNumElements = 0; + mMask = 0; + mMaxNumElementsAllowed = 0; + mInfoInc = InitialInfoInc; + mInfoHashShift = InitialInfoHashShift; + } + + // members are sorted so no padding occurs + uint64_t mHashMultiplier = UINT64_C(0xc4ceb9fe1a85ec53); // 8 byte 8 + Node* mKeyVals = reinterpret_cast_no_cast_align_warning(&mMask); // 8 byte 16 + uint8_t* mInfo = reinterpret_cast(&mMask); // 8 byte 24 + size_t mNumElements = 0; // 8 byte 32 + size_t mMask = 0; // 8 byte 40 + size_t mMaxNumElementsAllowed = 0; // 8 byte 48 + InfoType mInfoInc = InitialInfoInc; // 4 byte 52 + InfoType mInfoHashShift = InitialInfoHashShift; // 4 byte 56 + // 16 byte 56 if NodeAllocator + }; + + } // namespace detail + + // map + + template , + typename KeyEqual = std::equal_to, size_t MaxLoadFactor100 = 80> + using unordered_flat_map = detail::Table; + + template , + typename KeyEqual = std::equal_to, size_t MaxLoadFactor100 = 80> + using unordered_node_map = detail::Table; + + template , + typename KeyEqual = std::equal_to, size_t MaxLoadFactor100 = 80> + using unordered_map = + detail::Table) <= sizeof(size_t) * 6 && + std::is_nothrow_move_constructible>::value && + std::is_nothrow_move_assignable>::value, + MaxLoadFactor100, Key, T, Hash, KeyEqual>; + + // set + + template , typename KeyEqual = std::equal_to, + size_t MaxLoadFactor100 = 80> + using unordered_flat_set = detail::Table; + + template , typename KeyEqual = std::equal_to, + size_t MaxLoadFactor100 = 80> + using unordered_node_set = detail::Table; + + template , typename KeyEqual = std::equal_to, + size_t MaxLoadFactor100 = 80> + using unordered_set = detail::Table::value && + std::is_nothrow_move_assignable::value, + MaxLoadFactor100, Key, void, Hash, KeyEqual>; + +} // namespace robin_hood + +#endif diff --git a/3rdparty/unordered_dense/include/unordered_dense.h b/3rdparty/unordered_dense/include/unordered_dense.h deleted file mode 100644 index 13484a9817..0000000000 --- a/3rdparty/unordered_dense/include/unordered_dense.h +++ /dev/null @@ -1,2101 +0,0 @@ -///////////////////////// ankerl::unordered_dense::{map, set} ///////////////////////// - -// A fast & densely stored hashmap and hashset based on robin-hood backward shift deletion. -// Version 4.5.0 -// https://github.com/martinus/unordered_dense -// -// Licensed under the MIT License . -// SPDX-License-Identifier: MIT -// Copyright (c) 2022-2024 Martin Leitner-Ankerl -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -#ifndef ANKERL_UNORDERED_DENSE_H -#define ANKERL_UNORDERED_DENSE_H - -// see https://semver.org/spec/v2.0.0.html -#define ANKERL_UNORDERED_DENSE_VERSION_MAJOR 4 // NOLINT(cppcoreguidelines-macro-usage) incompatible API changes -#define ANKERL_UNORDERED_DENSE_VERSION_MINOR 5 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible functionality -#define ANKERL_UNORDERED_DENSE_VERSION_PATCH 0 // NOLINT(cppcoreguidelines-macro-usage) backwards compatible bug fixes - -// API versioning with inline namespace, see https://www.foonathan.net/2018/11/inline-namespaces/ - -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) v##major##_##minor##_##patch -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -#define ANKERL_UNORDERED_DENSE_VERSION_CONCAT(major, minor, patch) ANKERL_UNORDERED_DENSE_VERSION_CONCAT1(major, minor, patch) -#define ANKERL_UNORDERED_DENSE_NAMESPACE \ - ANKERL_UNORDERED_DENSE_VERSION_CONCAT( \ - ANKERL_UNORDERED_DENSE_VERSION_MAJOR, ANKERL_UNORDERED_DENSE_VERSION_MINOR, ANKERL_UNORDERED_DENSE_VERSION_PATCH) - -#if defined(_MSVC_LANG) -# define ANKERL_UNORDERED_DENSE_CPP_VERSION _MSVC_LANG -#else -# define ANKERL_UNORDERED_DENSE_CPP_VERSION __cplusplus -#endif - -#if defined(__GNUC__) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_PACK(decl) decl __attribute__((__packed__)) -#elif defined(_MSC_VER) -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_PACK(decl) __pragma(pack(push, 1)) decl __pragma(pack(pop)) -#endif - -// exceptions -#if defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND) -# define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 1 // NOLINT(cppcoreguidelines-macro-usage) -#else -# define ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() 0 // NOLINT(cppcoreguidelines-macro-usage) -#endif -#ifdef _MSC_VER -# define ANKERL_UNORDERED_DENSE_NOINLINE __declspec(noinline) -#else -# define ANKERL_UNORDERED_DENSE_NOINLINE __attribute__((noinline)) -#endif - -// defined in unordered_dense.cpp -#if !defined(ANKERL_UNORDERED_DENSE_EXPORT) -# define ANKERL_UNORDERED_DENSE_EXPORT -#endif - -#if ANKERL_UNORDERED_DENSE_CPP_VERSION < 201703L -# error ankerl::unordered_dense requires C++17 or higher -#else -# include // for array -# include // for uint64_t, uint32_t, uint8_t, UINT64_C -# include // for size_t, memcpy, memset -# include // for equal_to, hash -# include // for initializer_list -# include // for pair, distance -# include // for numeric_limits -# include // for allocator, allocator_traits, shared_ptr -# include // for optional -# include // for out_of_range -# include // for basic_string -# include // for basic_string_view, hash -# include // for forward_as_tuple -# include // for enable_if_t, declval, conditional_t, ena... -# include // for forward, exchange, pair, as_const, piece... -# include // for vector -# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() == 0 -# include // for abort -# endif - -# if defined(__has_include) && !defined(ANKERL_UNORDERED_DENSE_DISABLE_PMR) -# if __has_include() -# define ANKERL_UNORDERED_DENSE_PMR std::pmr // NOLINT(cppcoreguidelines-macro-usage) -# include // for polymorphic_allocator -# elif __has_include() -# define ANKERL_UNORDERED_DENSE_PMR std::experimental::pmr // NOLINT(cppcoreguidelines-macro-usage) -# include // for polymorphic_allocator -# endif -# endif - -# if defined(_MSC_VER) && defined(_M_X64) -# include -# pragma intrinsic(_umul128) -# endif - -# if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__clang__) -# define ANKERL_UNORDERED_DENSE_LIKELY(x) __builtin_expect(x, 1) // NOLINT(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) __builtin_expect(x, 0) // NOLINT(cppcoreguidelines-macro-usage) -# else -# define ANKERL_UNORDERED_DENSE_LIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_UNLIKELY(x) (x) // NOLINT(cppcoreguidelines-macro-usage) -# endif - -namespace ankerl::unordered_dense { -inline namespace ANKERL_UNORDERED_DENSE_NAMESPACE { - -namespace detail { - -# if ANKERL_UNORDERED_DENSE_HAS_EXCEPTIONS() - -// make sure this is not inlined as it is slow and dramatically enlarges code, thus making other -// inlinings more difficult. Throws are also generally the slow path. -[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_key_not_found() { - throw std::out_of_range("ankerl::unordered_dense::map::at(): key not found"); -} -[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_bucket_overflow() { - throw std::overflow_error("ankerl::unordered_dense: reached max bucket size, cannot increase size"); -} -[[noreturn]] inline ANKERL_UNORDERED_DENSE_NOINLINE void on_error_too_many_elements() { - throw std::out_of_range("ankerl::unordered_dense::map::replace(): too many elements"); -} - -# else - -[[noreturn]] inline void on_error_key_not_found() { - abort(); -} -[[noreturn]] inline void on_error_bucket_overflow() { - abort(); -} -[[noreturn]] inline void on_error_too_many_elements() { - abort(); -} - -# endif - -} // namespace detail - -// hash /////////////////////////////////////////////////////////////////////// - -// This is a stripped-down implementation of wyhash: https://github.com/wangyi-fudan/wyhash -// No big-endian support (because different values on different machines don't matter), -// hardcodes seed and the secret, reformats the code, and clang-tidy fixes. -namespace detail::wyhash { - -inline void mum(uint64_t* a, uint64_t* b) { -# if defined(__SIZEOF_INT128__) - __uint128_t r = *a; - r *= *b; - *a = static_cast(r); - *b = static_cast(r >> 64U); -# elif defined(_MSC_VER) && defined(_M_X64) - *a = _umul128(*a, *b, b); -# else - uint64_t ha = *a >> 32U; - uint64_t hb = *b >> 32U; - uint64_t la = static_cast(*a); - uint64_t lb = static_cast(*b); - uint64_t hi{}; - uint64_t lo{}; - uint64_t rh = ha * hb; - uint64_t rm0 = ha * lb; - uint64_t rm1 = hb * la; - uint64_t rl = la * lb; - uint64_t t = rl + (rm0 << 32U); - auto c = static_cast(t < rl); - lo = t + (rm1 << 32U); - c += static_cast(lo < t); - hi = rh + (rm0 >> 32U) + (rm1 >> 32U) + c; - *a = lo; - *b = hi; -# endif -} - -// multiply and xor mix function, aka MUM -[[nodiscard]] inline auto mix(uint64_t a, uint64_t b) -> uint64_t { - mum(&a, &b); - return a ^ b; -} - -// read functions. WARNING: we don't care about endianness, so results are different on big endian! -[[nodiscard]] inline auto r8(const uint8_t* p) -> uint64_t { - uint64_t v{}; - std::memcpy(&v, p, 8U); - return v; -} - -[[nodiscard]] inline auto r4(const uint8_t* p) -> uint64_t { - uint32_t v{}; - std::memcpy(&v, p, 4); - return v; -} - -// reads 1, 2, or 3 bytes -[[nodiscard]] inline auto r3(const uint8_t* p, size_t k) -> uint64_t { - return (static_cast(p[0]) << 16U) | (static_cast(p[k >> 1U]) << 8U) | p[k - 1]; -} - -[[maybe_unused]] [[nodiscard]] inline auto hash(void const* key, size_t len) -> uint64_t { - static constexpr auto secret = std::array{UINT64_C(0xa0761d6478bd642f), - UINT64_C(0xe7037ed1a0b428db), - UINT64_C(0x8ebc6af09c88c6e3), - UINT64_C(0x589965cc75374cc3)}; - - auto const* p = static_cast(key); - uint64_t seed = secret[0]; - uint64_t a{}; - uint64_t b{}; - if (ANKERL_UNORDERED_DENSE_LIKELY(len <= 16)) { - if (ANKERL_UNORDERED_DENSE_LIKELY(len >= 4)) { - a = (r4(p) << 32U) | r4(p + ((len >> 3U) << 2U)); - b = (r4(p + len - 4) << 32U) | r4(p + len - 4 - ((len >> 3U) << 2U)); - } else if (ANKERL_UNORDERED_DENSE_LIKELY(len > 0)) { - a = r3(p, len); - b = 0; - } else { - a = 0; - b = 0; - } - } else { - size_t i = len; - if (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 48)) { - uint64_t see1 = seed; - uint64_t see2 = seed; - do { - seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); - see1 = mix(r8(p + 16) ^ secret[2], r8(p + 24) ^ see1); - see2 = mix(r8(p + 32) ^ secret[3], r8(p + 40) ^ see2); - p += 48; - i -= 48; - } while (ANKERL_UNORDERED_DENSE_LIKELY(i > 48)); - seed ^= see1 ^ see2; - } - while (ANKERL_UNORDERED_DENSE_UNLIKELY(i > 16)) { - seed = mix(r8(p) ^ secret[1], r8(p + 8) ^ seed); - i -= 16; - p += 16; - } - a = r8(p + i - 16); - b = r8(p + i - 8); - } - - return mix(secret[1] ^ len, mix(a ^ secret[1], b ^ seed)); -} - -[[nodiscard]] inline auto hash(uint64_t x) -> uint64_t { - return detail::wyhash::mix(x, UINT64_C(0x9E3779B97F4A7C15)); -} - -} // namespace detail::wyhash - -ANKERL_UNORDERED_DENSE_EXPORT template -struct hash { - auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) - -> uint64_t { - return std::hash{}(obj); - } -}; - -template -struct hash::is_avalanching> { - using is_avalanching = void; - auto operator()(T const& obj) const noexcept(noexcept(std::declval>().operator()(std::declval()))) - -> uint64_t { - return std::hash{}(obj); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::basic_string const& str) const noexcept -> uint64_t { - return detail::wyhash::hash(str.data(), sizeof(CharT) * str.size()); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::basic_string_view const& sv) const noexcept -> uint64_t { - return detail::wyhash::hash(sv.data(), sizeof(CharT) * sv.size()); - } -}; - -template -struct hash { - using is_avalanching = void; - auto operator()(T* ptr) const noexcept -> uint64_t { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr)); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::unique_ptr const& ptr) const noexcept -> uint64_t { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr.get())); - } -}; - -template -struct hash> { - using is_avalanching = void; - auto operator()(std::shared_ptr const& ptr) const noexcept -> uint64_t { - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast) - return detail::wyhash::hash(reinterpret_cast(ptr.get())); - } -}; - -template -struct hash::value>::type> { - using is_avalanching = void; - auto operator()(Enum e) const noexcept -> uint64_t { - using underlying = typename std::underlying_type_t; - return detail::wyhash::hash(static_cast(e)); - } -}; - -template -struct tuple_hash_helper { - // Converts the value into 64bit. If it is an integral type, just cast it. Mixing is doing the rest. - // If it isn't an integral we need to hash it. - template - [[nodiscard]] constexpr static auto to64(Arg const& arg) -> uint64_t { - if constexpr (std::is_integral_v || std::is_enum_v) { - return static_cast(arg); - } else { - return hash{}(arg); - } - } - - [[nodiscard]] static auto mix64(uint64_t state, uint64_t v) -> uint64_t { - return detail::wyhash::mix(state + v, uint64_t{0x9ddfea08eb382d69}); - } - - // Creates a buffer that holds all the data from each element of the tuple. If possible we memcpy the data directly. If - // not, we hash the object and use this for the array. Size of the array is known at compile time, and memcpy is optimized - // away, so filling the buffer is highly efficient. Finally, call wyhash with this buffer. - template - [[nodiscard]] static auto calc_hash(T const& t, std::index_sequence) noexcept -> uint64_t { - auto h = uint64_t{}; - ((h = mix64(h, to64(std::get(t)))), ...); - return h; - } -}; - -template -struct hash> : tuple_hash_helper { - using is_avalanching = void; - auto operator()(std::tuple const& t) const noexcept -> uint64_t { - return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); - } -}; - -template -struct hash> : tuple_hash_helper { - using is_avalanching = void; - auto operator()(std::pair const& t) const noexcept -> uint64_t { - return tuple_hash_helper::calc_hash(t, std::index_sequence_for{}); - } -}; - -// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) -# define ANKERL_UNORDERED_DENSE_HASH_STATICCAST(T) \ - template <> \ - struct hash { \ - using is_avalanching = void; \ - auto operator()(T const& obj) const noexcept -> uint64_t { \ - return detail::wyhash::hash(static_cast(obj)); \ - } \ - } - -# if defined(__GNUC__) && !defined(__clang__) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wuseless-cast" -# endif -// see https://en.cppreference.com/w/cpp/utility/hash -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(bool); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(signed char); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned char); -# if ANKERL_UNORDERED_DENSE_CPP_VERSION >= 202002L && defined(__cpp_char8_t) -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char8_t); -# endif -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char16_t); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(char32_t); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(wchar_t); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(short); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned short); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(int); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned int); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(long long); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long); -ANKERL_UNORDERED_DENSE_HASH_STATICCAST(unsigned long long); - -# if defined(__GNUC__) && !defined(__clang__) -# pragma GCC diagnostic pop -# endif - -// bucket_type ////////////////////////////////////////////////////////// - -namespace bucket_type { - -struct standard { - static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint - static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint - - uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash - uint32_t m_value_idx; // index into the m_values vector. -}; - -ANKERL_UNORDERED_DENSE_PACK(struct big { - static constexpr uint32_t dist_inc = 1U << 8U; // skip 1 byte fingerprint - static constexpr uint32_t fingerprint_mask = dist_inc - 1; // mask for 1 byte of fingerprint - - uint32_t m_dist_and_fingerprint; // upper 3 byte: distance to original bucket. lower byte: fingerprint from hash - size_t m_value_idx; // index into the m_values vector. -}); - -} // namespace bucket_type - -namespace detail { - -struct nonesuch {}; -struct default_container_t {}; - -template class Op, class... Args> -struct detector { - using value_t = std::false_type; - using type = Default; -}; - -template class Op, class... Args> -struct detector>, Op, Args...> { - using value_t = std::true_type; - using type = Op; -}; - -template