diff --git a/.dockerignore b/.dockerignore index 64a58a78..4627a217 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,5 @@ build*/ +docs/ test/ .cache/ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7f78c354..666887d9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,6 +38,10 @@ on: env: BRANCH_NAME: ${{ github.head_ref || github.ref_name }} +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} + cancel-in-progress: true + jobs: ubuntu-latest-cmake: runs-on: ubuntu-latest @@ -92,6 +96,123 @@ jobs: path: | sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-${{ steps.system-info.outputs.OS_NAME }}-${{ steps.system-info.outputs.OS_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}.zip + ubuntu-latest-cmake-vulkan: + runs-on: ubuntu-latest + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential libvulkan-dev glslc + + - name: Build + id: cmake_build + run: | + mkdir build + cd build + cmake .. -DSD_BUILD_SHARED_LIBS=ON -DSD_VULKAN=ON + cmake --build . --config Release + + - name: Get commit hash + id: commit + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + uses: pr-mpt/actions-commit-hash@v2 + + - name: Fetch system info + id: system-info + run: | + echo "CPU_ARCH=`uname -m`" >> "$GITHUB_OUTPUT" + echo "OS_NAME=`lsb_release -s -i`" >> "$GITHUB_OUTPUT" + echo "OS_VERSION=`lsb_release -s -r`" >> "$GITHUB_OUTPUT" + echo "OS_TYPE=`uname -s`" >> "$GITHUB_OUTPUT" + + - name: Pack artifacts + id: pack_artifacts + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + run: | + cp ggml/LICENSE ./build/bin/ggml.txt + cp LICENSE ./build/bin/stable-diffusion.cpp.txt + zip -j sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-${{ steps.system-info.outputs.OS_NAME }}-${{ steps.system-info.outputs.OS_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-vulkan.zip ./build/bin/* + + - name: Upload artifacts + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + uses: actions/upload-artifact@v4 + with: + name: sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-${{ steps.system-info.outputs.OS_NAME }}-${{ steps.system-info.outputs.OS_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-vulkan.zip + path: | + sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-${{ steps.system-info.outputs.OS_NAME }}-${{ steps.system-info.outputs.OS_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-vulkan.zip + + build-and-push-docker-images: + name: Build and push container images + runs-on: ubuntu-latest + + permissions: + contents: read + packages: write + id-token: write + attestations: write + artifact-metadata: write + + strategy: + matrix: + variant: [musa, sycl, vulkan] + + env: + REGISTRY: ghcr.io + IMAGE_NAME: ${{ github.repository }} + + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + submodules: recursive + + - name: Get commit hash + id: commit + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + uses: pr-mpt/actions-commit-hash@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to the container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@v1.3.1 + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: false + + - name: Build and push Docker image + id: build-push + uses: docker/build-push-action@v6 + with: + platforms: linux/amd64 + push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + file: Dockerfile.${{ matrix.variant }} + tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.BRANCH_NAME }}-${{ matrix.variant }} + labels: ${{ steps.meta.outputs.labels }} + annotations: ${{ steps.meta.outputs.annotations }} + macOS-latest-cmake: runs-on: macos-latest @@ -146,7 +267,7 @@ jobs: sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-${{ steps.system-info.outputs.OS_NAME }}-${{ steps.system-info.outputs.OS_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}.zip windows-latest-cmake: - runs-on: windows-2025 + runs-on: windows-2022 env: VULKAN_VERSION: 1.4.328.1 @@ -163,8 +284,8 @@ jobs: - build: "avx512" defines: "-DGGML_NATIVE=OFF -DGGML_AVX512=ON -DGGML_AVX=ON -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON" - build: "cuda12" - defines: "-DSD_CUDA=ON -DSD_BUILD_SHARED_LIBS=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120'" - - build: 'vulkan' + defines: "-DSD_CUDA=ON -DSD_BUILD_SHARED_LIBS=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120' -DCMAKE_CUDA_FLAGS='-Xcudafe \"--diag_suppress=177\" -Xcudafe \"--diag_suppress=550\"'" + - build: "vulkan" defines: "-DSD_VULKAN=ON -DSD_BUILD_SHARED_LIBS=ON" steps: - name: Clone @@ -191,13 +312,17 @@ jobs: Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" + - name: Activate MSVC environment + id: msvc_dev_cmd + uses: ilammy/msvc-dev-cmd@v1 + - name: Build id: cmake_build run: | mkdir build cd build - cmake .. ${{ matrix.defines }} - cmake --build . --config Release + cmake .. -DCMAKE_CXX_FLAGS='/bigobj' -G Ninja -DCMAKE_C_COMPILER=cl.exe -DCMAKE_CXX_COMPILER=cl.exe -DCMAKE_BUILD_TYPE=Release ${{ matrix.defines }} + cmake --build . - name: Check AVX512F support id: check_avx512f @@ -360,6 +485,146 @@ jobs: path: | sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-win-rocm-x64.zip + ubuntu-latest-rocm: + runs-on: ubuntu-latest + container: rocm/dev-ubuntu-24.04:7.2 + + env: + ROCM_VERSION: "7.2" + UBUNTU_VERSION: "24.04" + GPU_TARGETS: "gfx1151;gfx1150;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201" + + steps: + - run: apt-get update && apt-get install -y git + - name: Clone + id: checkout + uses: actions/checkout@v6 + with: + submodules: recursive + + - name: Free disk space + run: | + # Remove preinstalled SDKs and caches not needed for this job + sudo rm -rf /usr/share/dotnet || true + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /opt/ghc || true + sudo rm -rf /usr/local/.ghcup || true + sudo rm -rf /opt/hostedtoolcache || true + + # Remove old package lists and caches + sudo rm -rf /var/lib/apt/lists/* || true + sudo apt clean + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt install -y \ + cmake \ + hip-dev \ + hipblas-dev \ + ninja-build \ + rocm-dev \ + zip + # Clean apt caches to recover disk space + sudo apt clean + sudo rm -rf /var/lib/apt/lists/* || true + + - name: Setup ROCm Environment + run: | + # Add ROCm to PATH for current session + echo "/opt/rocm/bin" >> $GITHUB_PATH + + # Build regex pattern from ${{ env.GPU_TARGETS }} (match target as substring) + TARGET_REGEX="($(printf '%s' "${{ env.GPU_TARGETS }}" | sed 's/;/|/g'))" + + # Remove library files for architectures we're not building for to save disk space + echo "Cleaning up unneeded architecture files..." + cd /opt/rocm/lib/rocblas/library + # Keep only our target architectures + for file in *; do + if printf '%s' "$file" | grep -q 'gfx'; then + if ! printf '%s' "$file" | grep -Eq "$TARGET_REGEX"; then + echo "Removing $file" && + sudo rm -f "$file"; + fi + fi + done + + cd /opt/rocm/lib/hipblaslt/library + for file in *; do + if printf '%s' "$file" | grep -q 'gfx'; then + if ! printf '%s' "$file" | grep -Eq "$TARGET_REGEX"; then + echo "Removing $file" && + sudo rm -f "$file"; + fi + fi + done + + - name: Build + id: cmake_build + run: | + mkdir build + cd build + cmake .. -G Ninja \ + -DCMAKE_CXX_COMPILER=amdclang++ \ + -DCMAKE_C_COMPILER=amdclang \ + -DCMAKE_BUILD_TYPE=Release \ + -DSD_HIPBLAS=ON \ + -DGPU_TARGETS="${{ env.GPU_TARGETS }}" \ + -DAMDGPU_TARGETS="${{ env.GPU_TARGETS }}" \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DSD_BUILD_SHARED_LIBS=ON + cmake --build . --config Release + + - name: Get commit hash + id: commit + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + uses: pr-mpt/actions-commit-hash@v2 + + - name: Prepare artifacts + id: prepare_artifacts + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + run: | + # Copy licenses + cp ggml/LICENSE ./build/bin/ggml.txt + cp LICENSE ./build/bin/stable-diffusion.cpp.txt + + # Move ROCm runtime libraries (to avoid double space consumption) + sudo mv /opt/rocm/lib/librocsparse.so* ./build/bin/ + sudo mv /opt/rocm/lib/libhsa-runtime64.so* ./build/bin/ + sudo mv /opt/rocm/lib/libamdhip64.so* ./build/bin/ + sudo mv /opt/rocm/lib/libhipblas.so* ./build/bin/ + sudo mv /opt/rocm/lib/libhipblaslt.so* ./build/bin/ + sudo mv /opt/rocm/lib/librocblas.so* ./build/bin/ + sudo mv /opt/rocm/lib/rocblas/ ./build/bin/ + sudo mv /opt/rocm/lib/hipblaslt/ ./build/bin/ + + - name: Fetch system info + id: system-info + run: | + echo "CPU_ARCH=`uname -m`" >> "$GITHUB_OUTPUT" + echo "OS_NAME=`lsb_release -s -i`" >> "$GITHUB_OUTPUT" + echo "OS_VERSION=`lsb_release -s -r`" >> "$GITHUB_OUTPUT" + echo "OS_TYPE=`uname -s`" >> "$GITHUB_OUTPUT" + + - name: Pack artifacts + id: pack_artifacts + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + run: | + cp ggml/LICENSE ./build/bin/ggml.txt + cp LICENSE ./build/bin/stable-diffusion.cpp.txt + zip -y -r sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-Ubuntu-${{ env.UBUNTU_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-rocm.zip ./build/bin + + - name: Upload artifacts + if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} + uses: actions/upload-artifact@v4 + with: + name: sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-Ubuntu-${{ env.UBUNTU_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-rocm.zip + path: | + sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-Ubuntu-${{ env.UBUNTU_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-rocm.zip + release: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} @@ -367,6 +632,9 @@ jobs: needs: - ubuntu-latest-cmake + - ubuntu-latest-cmake-vulkan + - ubuntu-latest-rocm + - build-and-push-docker-images - macOS-latest-cmake - windows-latest-cmake - windows-latest-cmake-hip diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ea1c47b..b90086ea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,6 +8,11 @@ if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() +if (MSVC) + add_compile_definitions(_CRT_SECURE_NO_WARNINGS) + add_compile_definitions(_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING) +endif() + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) @@ -82,9 +87,11 @@ endif() set(SD_LIB stable-diffusion) file(GLOB SD_LIB_SOURCES - "*.h" - "*.cpp" - "*.hpp" + "src/*.h" + "src/*.cpp" + "src/*.hpp" + "src/vocab/*.h" + "src/vocab/*.cpp" ) find_program(GIT_EXE NAMES git git.exe NO_CMAKE_FIND_ROOT_PATH) @@ -114,7 +121,7 @@ endif() message(STATUS "stable-diffusion.cpp commit ${SDCPP_BUILD_COMMIT}") set_property( - SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/version.cpp + SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/version.cpp APPEND PROPERTY COMPILE_DEFINITIONS SDCPP_BUILD_COMMIT=${SDCPP_BUILD_COMMIT} SDCPP_BUILD_VERSION=${SDCPP_BUILD_VERSION} ) @@ -177,6 +184,7 @@ endif() add_subdirectory(thirdparty) target_link_libraries(${SD_LIB} PUBLIC ggml zip) +target_include_directories(${SD_LIB} PUBLIC . include) target_include_directories(${SD_LIB} PUBLIC . thirdparty) target_compile_features(${SD_LIB} PUBLIC c_std_11 cxx_std_17) @@ -185,7 +193,7 @@ if (SD_BUILD_EXAMPLES) add_subdirectory(examples) endif() -set(SD_PUBLIC_HEADERS stable-diffusion.h) +set(SD_PUBLIC_HEADERS include/stable-diffusion.h) set_target_properties(${SD_LIB} PROPERTIES PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") install(TARGETS ${SD_LIB} LIBRARY PUBLIC_HEADER) diff --git a/Dockerfile b/Dockerfile index da73021c..26a8f41c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG UBUNTU_VERSION=22.04 +ARG UBUNTU_VERSION=24.04 FROM ubuntu:$UBUNTU_VERSION AS build @@ -18,5 +18,6 @@ RUN apt-get update && \ apt-get clean COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli +COPY --from=build /sd.cpp/build/bin/sd-server /sd-server ENTRYPOINT [ "/sd-cli" ] \ No newline at end of file diff --git a/Dockerfile.musa b/Dockerfile.musa index 0eac3d7f..2d95f817 100644 --- a/Dockerfile.musa +++ b/Dockerfile.musa @@ -19,5 +19,6 @@ RUN mkdir build && cd build && \ FROM mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}-amd64 as runtime COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli +COPY --from=build /sd.cpp/build/bin/sd-server /sd-server ENTRYPOINT [ "/sd-cli" ] \ No newline at end of file diff --git a/Dockerfile.sycl b/Dockerfile.sycl index 6bcb91da..466d5517 100644 --- a/Dockerfile.sycl +++ b/Dockerfile.sycl @@ -15,5 +15,6 @@ RUN mkdir build && cd build && \ FROM intel/oneapi-basekit:${SYCL_VERSION}-devel-ubuntu24.04 AS runtime COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli +COPY --from=build /sd.cpp/build/bin/sd-server /sd-server ENTRYPOINT [ "/sd-cli" ] diff --git a/Dockerfile.vulkan b/Dockerfile.vulkan new file mode 100644 index 00000000..5ba6cb05 --- /dev/null +++ b/Dockerfile.vulkan @@ -0,0 +1,23 @@ +ARG UBUNTU_VERSION=24.04 + +FROM ubuntu:$UBUNTU_VERSION AS build + +RUN apt-get update && apt-get install -y --no-install-recommends build-essential git cmake libvulkan-dev glslc + +WORKDIR /sd.cpp + +COPY . . + +RUN cmake . -B ./build -DSD_VULKAN=ON +RUN cmake --build ./build --config Release --parallel + +FROM ubuntu:$UBUNTU_VERSION AS runtime + +RUN apt-get update && \ + apt-get install --yes --no-install-recommends libgomp1 libvulkan1 mesa-vulkan-drivers && \ + apt-get clean + +COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli +COPY --from=build /sd.cpp/build/bin/sd-server /sd-server + +ENTRYPOINT [ "/sd-cli" ] diff --git a/README.md b/README.md index aa29f849..b5bb4975 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,9 @@ API and command-line option may change frequently.*** ## 🔥Important News +* **2026/01/18** 🚀 stable-diffusion.cpp now supports **FLUX.2-klein** + 👉 Details: [PR #1193](https://github.com/leejet/stable-diffusion.cpp/pull/1193) + * **2025/12/01** 🚀 stable-diffusion.cpp now supports **Z-Image** 👉 Details: [PR #1020](https://github.com/leejet/stable-diffusion.cpp/pull/1020) @@ -43,16 +46,17 @@ API and command-line option may change frequently.*** - SDXL, [SDXL-Turbo](https://huggingface.co/stabilityai/sdxl-turbo) - [Some SD1.x and SDXL distilled models](./docs/distilled_sd.md) - [SD3/SD3.5](./docs/sd3.md) - - [FlUX.1-dev/FlUX.1-schnell](./docs/flux.md) - - [FLUX.2-dev](./docs/flux2.md) + - [FLUX.1-dev/FLUX.1-schnell](./docs/flux.md) + - [FLUX.2-dev/FLUX.2-klein](./docs/flux2.md) - [Chroma](./docs/chroma.md) - [Chroma1-Radiance](./docs/chroma_radiance.md) - [Qwen Image](./docs/qwen_image.md) - [Z-Image](./docs/z_image.md) - [Ovis-Image](./docs/ovis_image.md) + - [Anima](./docs/anima.md) - Image Edit Models - [FLUX.1-Kontext-dev](./docs/kontext.md) - - [Qwen Image Edit/Qwen Image Edit 2509](./docs/qwen_image_edit.md) + - [Qwen Image Edit series](./docs/qwen_image_edit.md) - Video Models - [Wan2.1/Wan2.2](./docs/wan.md) - [PhotoMaker](https://github.com/TencentARC/PhotoMaker) support. @@ -70,7 +74,7 @@ API and command-line option may change frequently.*** - SYCL - Supported weight formats - Pytorch checkpoint (`.ckpt` or `.pth`) - - Safetensors (`./safetensors`) + - Safetensors (`.safetensors`) - GGUF (`.gguf`) - Supported platforms - Linux @@ -127,15 +131,16 @@ If you want to improve performance or reduce VRAM/RAM usage, please refer to [pe - [SD1.x/SD2.x/SDXL](./docs/sd.md) - [SD3/SD3.5](./docs/sd3.md) -- [FlUX.1-dev/FlUX.1-schnell](./docs/flux.md) -- [FLUX.2-dev](./docs/flux2.md) +- [FLUX.1-dev/FLUX.1-schnell](./docs/flux.md) +- [FLUX.2-dev/FLUX.2-klein](./docs/flux2.md) - [FLUX.1-Kontext-dev](./docs/kontext.md) - [Chroma](./docs/chroma.md) - [🔥Qwen Image](./docs/qwen_image.md) -- [🔥Qwen Image Edit/Qwen Image Edit 2509](./docs/qwen_image_edit.md) +- [🔥Qwen Image Edit series](./docs/qwen_image_edit.md) - [🔥Wan2.1/Wan2.2](./docs/wan.md) - [🔥Z-Image](./docs/z_image.md) - [Ovis-Image](./docs/ovis_image.md) +- [Anima](./docs/anima.md) - [LoRA](./docs/lora.md) - [LCM/LCM-LoRA](./docs/lcm.md) - [Using PhotoMaker to personalize image generation](./docs/photo_maker.md) @@ -143,6 +148,7 @@ If you want to improve performance or reduce VRAM/RAM usage, please refer to [pe - [Using TAESD to faster decoding](./docs/taesd.md) - [Docker](./docs/docker.md) - [Quantization and GGUF](./docs/quantization_and_gguf.md) +- [Inference acceleration via caching](./docs/caching.md) ## Bindings diff --git a/assets/anima/example.png b/assets/anima/example.png new file mode 100644 index 00000000..ab91dbf2 Binary files /dev/null and b/assets/anima/example.png differ diff --git a/assets/flux2/flux2-klein-4b-edit.png b/assets/flux2/flux2-klein-4b-edit.png new file mode 100644 index 00000000..481a0a6f Binary files /dev/null and b/assets/flux2/flux2-klein-4b-edit.png differ diff --git a/assets/flux2/flux2-klein-4b.png b/assets/flux2/flux2-klein-4b.png new file mode 100644 index 00000000..2809752c Binary files /dev/null and b/assets/flux2/flux2-klein-4b.png differ diff --git a/assets/flux2/flux2-klein-9b-edit.png b/assets/flux2/flux2-klein-9b-edit.png new file mode 100644 index 00000000..41228f1d Binary files /dev/null and b/assets/flux2/flux2-klein-9b-edit.png differ diff --git a/assets/flux2/flux2-klein-9b.png b/assets/flux2/flux2-klein-9b.png new file mode 100644 index 00000000..48adea2a Binary files /dev/null and b/assets/flux2/flux2-klein-9b.png differ diff --git a/assets/flux2/flux2-klein-base-4b.png b/assets/flux2/flux2-klein-base-4b.png new file mode 100644 index 00000000..f29a123d Binary files /dev/null and b/assets/flux2/flux2-klein-base-4b.png differ diff --git a/assets/flux2/flux2-klein-base-9b.png b/assets/flux2/flux2-klein-base-9b.png new file mode 100644 index 00000000..6241f425 Binary files /dev/null and b/assets/flux2/flux2-klein-base-9b.png differ diff --git a/assets/qwen/qwen_image_edit_2511.png b/assets/qwen/qwen_image_edit_2511.png new file mode 100644 index 00000000..18a26dac Binary files /dev/null and b/assets/qwen/qwen_image_edit_2511.png differ diff --git a/assets/z_image/base_bf16.png b/assets/z_image/base_bf16.png new file mode 100644 index 00000000..f2b918c0 Binary files /dev/null and b/assets/z_image/base_bf16.png differ diff --git a/docs/anima.md b/docs/anima.md new file mode 100644 index 00000000..9c941785 --- /dev/null +++ b/docs/anima.md @@ -0,0 +1,20 @@ +# How to Use + +## Download weights + +- Download Anima + - safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/diffusion_models + - gguf: https://huggingface.co/Bedovyy/Anima-GGUF/tree/main +- Download vae + - safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/vae +- Download Qwen3-0.6B-Base + - safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/text_encoders + - gguf: https://huggingface.co/mradermacher/Qwen3-0.6B-Base-GGUF/tree/main + +## Examples + +```sh +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\anima-preview.safetensors --vae ..\..\ComfyUI\models\vae\qwen_image_vae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_06b_base.safetensors -p "a lovely cat holding a sign says 'anima.cpp'" --cfg-scale 6.0 --sampling-method euler -v --offload-to-cpu --diffusion-fa +``` + +anima image example \ No newline at end of file diff --git a/docs/caching.md b/docs/caching.md new file mode 100644 index 00000000..7b4be3ce --- /dev/null +++ b/docs/caching.md @@ -0,0 +1,126 @@ +## Caching + +Caching methods accelerate diffusion inference by reusing intermediate computations when changes between steps are small. + +### Cache Modes + +| Mode | Target | Description | +|------|--------|-------------| +| `ucache` | UNET models | Condition-level caching with error tracking | +| `easycache` | DiT models | Condition-level cache | +| `dbcache` | DiT models | Block-level L1 residual threshold | +| `taylorseer` | DiT models | Taylor series approximation | +| `cache-dit` | DiT models | Combined DBCache + TaylorSeer | + +### UCache (UNET Models) + +UCache caches the residual difference (output - input) and reuses it when input changes are below threshold. + +```bash +sd-cli -m model.safetensors -p "a cat" --cache-mode ucache --cache-option "threshold=1.5" +``` + +#### Parameters + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `threshold` | Error threshold for reuse decision | 1.0 | +| `start` | Start caching at this percent of steps | 0.15 | +| `end` | Stop caching at this percent of steps | 0.95 | +| `decay` | Error decay rate (0-1) | 1.0 | +| `relative` | Scale threshold by output norm (0/1) | 1 | +| `reset` | Reset error after computing (0/1) | 1 | + +#### Reset Parameter + +The `reset` parameter controls error accumulation behavior: + +- `reset=1` (default): Resets accumulated error after each computed step. More aggressive caching, works well with most samplers. +- `reset=0`: Keeps error accumulated. More conservative, recommended for `euler_a` sampler. + +### EasyCache (DiT Models) + +Condition-level caching for DiT models. Caches and reuses outputs when input changes are below threshold. + +```bash +--cache-mode easycache --cache-option "threshold=0.3" +``` + +#### Parameters + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `threshold` | Input change threshold for reuse | 0.2 | +| `start` | Start caching at this percent of steps | 0.15 | +| `end` | Stop caching at this percent of steps | 0.95 | + +### Cache-DIT (DiT Models) + +For DiT models like FLUX and QWEN, use block-level caching modes. + +#### DBCache + +Caches blocks based on L1 residual difference threshold: + +```bash +--cache-mode dbcache --cache-option "threshold=0.25,warmup=4" +``` + +#### TaylorSeer + +Uses Taylor series approximation to predict block outputs: + +```bash +--cache-mode taylorseer +``` + +#### Cache-DIT (Combined) + +Combines DBCache and TaylorSeer: + +```bash +--cache-mode cache-dit --cache-preset fast +``` + +#### Parameters + +| Parameter | Description | Default | +|-----------|-------------|---------| +| `Fn` | Front blocks to always compute | 8 | +| `Bn` | Back blocks to always compute | 0 | +| `threshold` | L1 residual difference threshold | 0.08 | +| `warmup` | Steps before caching starts | 8 | + +#### Presets + +Available presets: `slow`, `medium`, `fast`, `ultra` (or `s`, `m`, `f`, `u`). + +```bash +--cache-mode cache-dit --cache-preset fast +``` + +#### SCM Options + +Steps Computation Mask controls which steps can be cached: + +```bash +--scm-mask "1,1,1,1,0,0,1,0,0,0,1,0,0,0,1,0,0,0,1,1" +``` + +Mask values: `1` = compute, `0` = can cache. + +| Policy | Description | +|--------|-------------| +| `dynamic` | Check threshold before caching | +| `static` | Always cache on cacheable steps | + +```bash +--scm-policy dynamic +``` + +### Performance Tips + +- Start with default thresholds and adjust based on output quality +- Lower threshold = better quality, less speedup +- Higher threshold = more speedup, potential quality loss +- More steps generally means more caching opportunities diff --git a/docs/distilled_sd.md b/docs/distilled_sd.md index 478305f2..3174b18f 100644 --- a/docs/distilled_sd.md +++ b/docs/distilled_sd.md @@ -1,8 +1,8 @@ -# Running distilled models: SSD1B and SDx.x with tiny U-Nets +# Running distilled models: SSD1B, Vega and SDx.x with tiny U-Nets ## Preface -These models feature a reduced U-Net architecture. Unlike standard SDXL models, the SSD-1B U-Net contains only one middle block and fewer attention layers in its up- and down-blocks, resulting in significantly smaller file sizes. Using these models can reduce inference time by more than 33%. For more details, refer to Segmind's paper: https://arxiv.org/abs/2401.02677v1. +These models feature a reduced U-Net architecture. Unlike standard SDXL models, the SSD-1B and Vega U-Net contains only one middle block and fewer attention layers in its up- and down-blocks, resulting in significantly smaller file sizes. Using these models can reduce inference time by more than 33%. For more details, refer to Segmind's paper: https://arxiv.org/abs/2401.02677v1. Similarly, SD1.x- and SD2.x-style models with a tiny U-Net consist of only 6 U-Net blocks, leading to very small files and time savings of up to 50%. For more information, see the paper: https://arxiv.org/pdf/2305.15798.pdf. ## SSD1B @@ -17,7 +17,17 @@ Useful LoRAs are also available: * https://huggingface.co/seungminh/lora-swarovski-SSD-1B/resolve/main/pytorch_lora_weights.safetensors * https://huggingface.co/kylielee505/mylcmlorassd/resolve/main/pytorch_lora_weights.safetensors -These files can be used out-of-the-box, unlike the models described in the next section. +## Vega + +Segmind's Vega model is available online here: + + * https://huggingface.co/segmind/Segmind-Vega/resolve/main/segmind-vega.safetensors + +VegaRT is an example for an LCM-LoRA: + + * https://huggingface.co/segmind/Segmind-VegaRT/resolve/main/pytorch_lora_weights.safetensors + +Both files can be used out-of-the-box, unlike the models described in next sections. ## SD1.x, SD2.x with tiny U-Nets @@ -83,7 +93,7 @@ python convert_diffusers_to_original_stable_diffusion.py \ The file segmind_tiny-sd.ckpt will be generated and is now ready for use with sd.cpp. You can follow a similar process for the other models mentioned above. -### Another available .ckpt file: +##### Another available .ckpt file: * https://huggingface.co/ClashSAN/small-sd/resolve/main/tinySDdistilled.ckpt @@ -97,3 +107,31 @@ for key, value in ckpt['state_dict'].items(): ckpt['state_dict'][key] = value.contiguous() torch.save(ckpt, "tinySDdistilled_fixed.ckpt") ``` + + +### SDXS-512 + +Another very tiny and **incredibly fast** model is SDXS by IDKiro et al. The authors refer to it as *"Real-Time One-Step Latent Diffusion Models with Image Conditions"*. For details read the paper: https://arxiv.org/pdf/2403.16627 . Once again the authors removed some more blocks of U-Net part and unlike other SD1 models they use an adjusted _AutoEncoderTiny_ instead of default _AutoEncoderKL_ for the VAE part. + +##### 1. Download the diffusers model from Hugging Face using Python: + +```python +from diffusers import StableDiffusionPipeline +pipe = StableDiffusionPipeline.from_pretrained("IDKiro/sdxs-512-dreamshaper") +pipe.save_pretrained(save_directory="sdxs") +``` +##### 2. Create a safetensors file + +```bash +python convert_diffusers_to_original_stable_diffusion.py \ + --model_path sdxs --checkpoint_path sdxs.safetensors --half --use_safetensors +``` + +##### 3. Run the model as follows: + +```bash +~/stable-diffusion.cpp/build/bin/sd-cli -m sdxs.safetensors -p "portrait of a lovely cat" \ + --cfg-scale 1 --steps 1 +``` + +Both options: ``` --cfg-scale 1 ``` and ``` --steps 1 ``` are mandatory here. diff --git a/docs/docker.md b/docs/docker.md index 26a5f714..660ed257 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,15 +1,39 @@ -## Docker +# Docker -### Building using Docker +## Run CLI + +```shell +docker run --rm -v /path/to/models:/models -v /path/to/output/:/output ghcr.io/leejet/stable-diffusion.cpp:master [args...] +# For example +# docker run --rm -v ./models:/models -v ./build:/output ghcr.io/leejet/stable-diffusion.cpp:master -m /models/sd-v1-4.ckpt -p "a lovely cat" -v -o /output/output.png +``` + +## Run server + +```shell +docker run --rm --init -v /path/to/models:/models -v /path/to/output/:/output -p "1234:1234" --entrypoint "/sd-server" ghcr.io/leejet/stable-diffusion.cpp:master [args...] +# For example +# docker run --rm --init -v ./models:/models -v ./build:/output -p "1234:1234" --entrypoint "/sd-server" ghcr.io/leejet/stable-diffusion.cpp:master -m /models/sd-v1-4.ckpt -p "a lovely cat" -v -o /output/output.png +``` + +## Building using Docker ```shell docker build -t sd . ``` -### Run +## Building variants using Docker + +Vulkan: ```shell -docker run -v /path/to/models:/models -v /path/to/output/:/output sd-cli [args...] +docker build -f Dockerfile.vulkan -t sd . +``` + +## Run locally built image's CLI + +```shell +docker run --rm -v /path/to/models:/models -v /path/to/output/:/output sd [args...] # For example -# docker run -v ./models:/models -v ./build:/output sd-cli -m /models/sd-v1-4.ckpt -p "a lovely cat" -v -o /output/output.png -``` \ No newline at end of file +# docker run --rm -v ./models:/models -v ./build:/output sd -m /models/sd-v1-4.ckpt -p "a lovely cat" -v -o /output/output.png +``` diff --git a/docs/esrgan.md b/docs/esrgan.md index 77231726..39a97605 100644 --- a/docs/esrgan.md +++ b/docs/esrgan.md @@ -1,6 +1,6 @@ ## Using ESRGAN to upscale results -You can use ESRGAN to upscale the generated images. At the moment, only the [RealESRGAN_x4plus_anime_6B.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth) model is supported. Support for more models of this architecture will be added soon. +You can use ESRGAN—such as the model [RealESRGAN_x4plus_anime_6B.pth](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth)—to upscale the generated images and improve their overall resolution and clarity. - Specify the model path using the `--upscale-model PATH` parameter. example: diff --git a/docs/flux2.md b/docs/flux2.md index 0c2c6d2b..1524478c 100644 --- a/docs/flux2.md +++ b/docs/flux2.md @@ -1,6 +1,8 @@ # How to Use -## Download weights +## Flux.2-dev + +### Download weights - Download FLUX.2-dev - gguf: https://huggingface.co/city96/FLUX.2-dev-gguf/tree/main @@ -9,7 +11,7 @@ - Download Mistral-Small-3.2-24B-Instruct-2506-GGUF - gguf: https://huggingface.co/unsloth/Mistral-Small-3.2-24B-Instruct-2506-GGUF/tree/main -## Examples +### Examples ``` .\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux2-dev-Q4_K_S.gguf --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\Mistral-Small-3.2-24B-Instruct-2506-Q4_K_M.gguf -r .\kontext_input.png -p "change 'flux.cpp' to 'flux2-dev.cpp'" --cfg-scale 1.0 --sampling-method euler -v --diffusion-fa --offload-to-cpu @@ -17,5 +19,74 @@ flux2 example +## Flux.2 klein 4B / Flux.2 klein base 4B +### Download weights +- Download FLUX.2-klein-4B + - safetensors: https://huggingface.co/black-forest-labs/FLUX.2-klein-4B + - gguf: https://huggingface.co/leejet/FLUX.2-klein-4B-GGUF/tree/main +- Download FLUX.2-klein-base-4B + - safetensors: https://huggingface.co/black-forest-labs/FLUX.2-klein-base-4B + - gguf: https://huggingface.co/leejet/FLUX.2-klein-base-4B-GGUF/tree/main +- Download vae + - safetensors: https://huggingface.co/black-forest-labs/FLUX.2-dev/tree/main +- Download Qwen3 4b + - safetensors: https://huggingface.co/Comfy-Org/flux2-klein-4B/tree/main/split_files/text_encoders + - gguf: https://huggingface.co/unsloth/Qwen3-4B-GGUF/tree/main + +### Examples + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux-2-klein-4b.safetensors --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_4b.safetensors -p "a lovely cat" --cfg-scale 1.0 --steps 4 -v --offload-to-cpu --diffusion-fa +``` + +flux2-klein-4b + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux-2-klein-4b.safetensors --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_4b.safetensors -r .\kontext_input.png -p "change 'flux.cpp' to 'klein.cpp'" --cfg-scale 1.0 --sampling-method euler -v --diffusion-fa --offload-to-cpu --steps 4 +``` + +flux2-klein-4b-edit + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux-2-klein-base-4b.safetensors --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_4b.safetensors -p "a lovely cat" --cfg-scale 4.0 --steps 20 -v --offload-to-cpu --diffusion-fa +``` + +flux2-klein-base-4b + +## Flux.2 klein 9B / Flux.2 klein base 9B + +### Download weights + +- Download FLUX.2-klein-9B + - safetensors: https://huggingface.co/black-forest-labs/FLUX.2-klein-9B + - gguf: https://huggingface.co/leejet/FLUX.2-klein-9B-GGUF/tree/main +- Download FLUX.2-klein-base-9B + - safetensors: https://huggingface.co/black-forest-labs/FLUX.2-klein-base-9B + - gguf: https://huggingface.co/leejet/FLUX.2-klein-base-9B-GGUF/tree/main +- Download vae + - safetensors: https://huggingface.co/black-forest-labs/FLUX.2-dev/tree/main +- Download Qwen3 8B + - safetensors: https://huggingface.co/Comfy-Org/flux2-klein-9B/tree/main/split_files/text_encoders + - gguf: https://huggingface.co/unsloth/Qwen3-8B-GGUF/tree/main + +### Examples + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux-2-klein-9b.safetensors --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_8b.safetensors -p "a lovely cat" --cfg-scale 1.0 --steps 4 -v --offload-to-cpu --diffusion-fa +``` + +flux2-klein-9b + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux-2-klein-9b.safetensors --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_8b.safetensors -r .\kontext_input.png -p "change 'flux.cpp' to 'klein.cpp'" --cfg-scale 1.0 --sampling-method euler -v --diffusion-fa --offload-to-cpu --steps 4 +``` + +flux2-klein-9b-edit + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\flux-2-klein-base-9b.safetensors --vae ..\..\ComfyUI\models\vae\flux2_ae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_8b.safetensors -p "a lovely cat" --cfg-scale 4.0 --steps 20 -v --offload-to-cpu --diffusion-fa +``` + +flux2-klein-base-9b \ No newline at end of file diff --git a/docs/qwen_image_edit.md b/docs/qwen_image_edit.md index d376a283..4a8b0172 100644 --- a/docs/qwen_image_edit.md +++ b/docs/qwen_image_edit.md @@ -9,6 +9,9 @@ - Qwen Image Edit 2509 - safetensors: https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI/tree/main/split_files/diffusion_models - gguf: https://huggingface.co/QuantStack/Qwen-Image-Edit-2509-GGUF/tree/main + - Qwen Image Edit 2511 + - safetensors: https://huggingface.co/Comfy-Org/Qwen-Image-Edit_ComfyUI/tree/main/split_files/diffusion_models + - gguf: https://huggingface.co/unsloth/Qwen-Image-Edit-2511-GGUF/tree/main - Download vae - safetensors: https://huggingface.co/Comfy-Org/Qwen-Image_ComfyUI/tree/main/split_files/vae - Download qwen_2.5_vl 7b @@ -32,4 +35,14 @@ .\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\Qwen-Image-Edit-2509-Q4_K_S.gguf --vae ..\..\ComfyUI\models\vae\qwen_image_vae.safetensors --llm ..\..\ComfyUI\models\text_encoders\Qwen2.5-VL-7B-Instruct-Q8_0.gguf --llm_vision ..\..\ComfyUI\models\text_encoders\Qwen2.5-VL-7B-Instruct.mmproj-Q8_0.gguf --cfg-scale 2.5 --sampling-method euler -v --offload-to-cpu --diffusion-fa --flow-shift 3 -r ..\assets\flux\flux1-dev-q8_0.png -p "change 'flux.cpp' to 'Qwen Image Edit 2509'" ``` -qwen_image_edit_2509 \ No newline at end of file +qwen_image_edit_2509 + +### Qwen Image Edit 2511 + +To use the new Qwen Image Edit 2511 mode, the `--qwen-image-zero-cond-t` flag must be enabled; otherwise, image editing quality will degrade significantly. + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\qwen-image-edit-2511-Q4_K_M.gguf --vae ..\..\ComfyUI\models\vae\qwen_image_vae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_2.5_vl_7b.safetensors --cfg-scale 2.5 --sampling-method euler -v --offload-to-cpu --diffusion-fa --flow-shift 3 -r ..\assets\flux\flux1-dev-q8_0.png -p "change 'flux.cpp' to 'edit.cpp'" --qwen-image-zero-cond-t +``` + +qwen_image_edit_2509 \ No newline at end of file diff --git a/docs/taesd.md b/docs/taesd.md index 5160b793..a41c64d4 100644 --- a/docs/taesd.md +++ b/docs/taesd.md @@ -14,4 +14,26 @@ curl -L -O https://huggingface.co/madebyollin/taesd/resolve/main/diffusion_pytor ```bash sd-cli -m ../models/v1-5-pruned-emaonly.safetensors -p "a lovely cat" --taesd ../models/diffusion_pytorch_model.safetensors -``` \ No newline at end of file +``` + +### Qwen-Image and wan (TAEHV) + +sd.cpp also supports [TAEHV](https://github.com/madebyollin/taehv) (#937), which can be used for Qwen-Image and wan. + +- For **Qwen-Image and wan2.1 and wan2.2-A14B**, download the wan2.1 tae [safetensors weights](https://github.com/madebyollin/taehv/blob/main/safetensors/taew2_1.safetensors) + + Or curl + + ```bash + curl -L -O https://github.com/madebyollin/taehv/raw/refs/heads/main/safetensors/taew2_1.safetensors + ``` + +- For **wan2.2-TI2V-5B**, use the wan2.2 tae [safetensors weights](https://github.com/madebyollin/taehv/blob/main/safetensors/taew2_2.safetensors) + + Or curl + + ```bash + curl -L -O https://github.com/madebyollin/taehv/raw/refs/heads/main/safetensors/taew2_2.safetensors + ``` + +Then simply replace the `--vae xxx.safetensors` with `--tae xxx.safetensors` in the commands. If it still out of VRAM, add `--vae-conv-direct` to your command though might be slower. diff --git a/docs/wan.md b/docs/wan.md index ce15ba58..6f5749c8 100644 --- a/docs/wan.md +++ b/docs/wan.md @@ -39,6 +39,9 @@ - safetensors: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors - wan_2.2_vae (for Wan2.2 TI2V 5B only) - safetensors: https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/blob/main/split_files/vae/wan2.2_vae.safetensors + + > Wan models vae requires really much VRAM! If you do not have enough VRAM, please try tae instead, though the results may be poorer. For tae usage, please refer to [taesd](taesd.md) + - Download umt5_xxl - safetensors: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/text_encoders/umt5_xxl_fp16.safetensors - gguf: https://huggingface.co/city96/umt5-xxl-encoder-gguf/tree/main diff --git a/docs/z_image.md b/docs/z_image.md index 122f1f20..2ea66f9b 100644 --- a/docs/z_image.md +++ b/docs/z_image.md @@ -7,6 +7,9 @@ You can run Z-Image with stable-diffusion.cpp on GPUs with 4GB of VRAM — or ev - Download Z-Image-Turbo - safetensors: https://huggingface.co/Comfy-Org/z_image_turbo/tree/main/split_files/diffusion_models - gguf: https://huggingface.co/leejet/Z-Image-Turbo-GGUF/tree/main +- Download Z-Image + - safetensors: https://huggingface.co/Comfy-Org/z_image/tree/main/split_files/diffusion_models + - gguf: https://huggingface.co/unsloth/Z-Image-GGUF/tree/main - Download vae - safetensors: https://huggingface.co/black-forest-labs/FLUX.1-schnell/tree/main - Download Qwen3 4b @@ -15,12 +18,22 @@ You can run Z-Image with stable-diffusion.cpp on GPUs with 4GB of VRAM — or ev ## Examples +### Z-Image-Turbo + ``` .\bin\Release\sd-cli.exe --diffusion-model z_image_turbo-Q3_K.gguf --vae ..\..\ComfyUI\models\vae\ae.sft --llm ..\..\ComfyUI\models\text_encoders\Qwen3-4B-Instruct-2507-Q4_K_M.gguf -p "A cinematic, melancholic photograph of a solitary hooded figure walking through a sprawling, rain-slicked metropolis at night. The city lights are a chaotic blur of neon orange and cool blue, reflecting on the wet asphalt. The scene evokes a sense of being a single component in a vast machine. Superimposed over the image in a sleek, modern, slightly glitched font is the philosophical quote: 'THE CITY IS A CIRCUIT BOARD, AND I AM A BROKEN TRANSISTOR.' -- moody, atmospheric, profound, dark academic" --cfg-scale 1.0 -v --offload-to-cpu --diffusion-fa -H 1024 -W 512 ``` z-image example +### Z-Image-Base + +``` +.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\z_image_bf16.safetensors --vae ..\..\ComfyUI\models\vae\ae.sft --llm ..\..\ComfyUI\models\text_encoders\qwen_3_4b.safetensors -p "A cinematic, melancholic photograph of a solitary hooded figure walking through a sprawling, rain-slicked metropolis at night. The city lights are a chaotic blur of neon orange and cool blue, reflecting on the wet asphalt. The scene evokes a sense of being a single component in a vast machine. Superimposed over the image in a sleek, modern, slightly glitched font is the philosophical quote: 'THE CITY IS A CIRCUIT BOARD, AND I AM A BROKEN TRANSISTOR.' -- moody, atmospheric, profound, dark academic" --cfg-scale 5.0 -v --offload-to-cpu --diffusion-fa -H 1024 -W 512 +``` + +z-image example + ## Comparison of Different Quantization Types | bf16 | q8_0 | q6_K | q5_0 | q4_K | q4_0 | q3_K | q2_K| diff --git a/examples/cli/README.md b/examples/cli/README.md index 8531b2ae..564e5ce0 100644 --- a/examples/cli/README.md +++ b/examples/cli/README.md @@ -4,11 +4,14 @@ usage: ./bin/sd-cli [options] CLI Options: - -o, --output path to write result image to (default: ./output.png) + -o, --output path to write result image to. you can use printf-style %d format specifiers for image sequences (default: + ./output.png) (eg. output_%03d.png) --preview-path path to write preview image to (default: ./preview.png) --preview-interval interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at every step) + --output-begin-idx starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise) --canny apply canny preprocessor (edge detection) + --convert-name convert tensor name (for convert mode) -v, --verbose print extra info --color colors the logging tags according to level --taesd-preview-only prevents usage of taesd for decoding the final image. (for use with --preview tae) @@ -42,17 +45,22 @@ Context Options: CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma --vae-tile-overlap tile overlap for vae tiling, in fraction of tile size (default: 0.5) - --flow-shift shift value for Flow models like SD3.x or WAN (default: auto) --vae-tiling process vae in tiles to reduce memory usage --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed + --mmap whether to memory-map model --control-net-cpu keep controlnet in cpu (for low vram) --clip-on-cpu keep clip in cpu (for low vram) --vae-on-cpu keep vae in cpu (for low vram) - --diffusion-fa use flash attention in the diffusion model + --fa use flash attention + --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model --vae-conv-direct use ggml_conv2d_direct in the vae model + --circular enable circular padding for convolutions + --circularx enable circular RoPE wrapping on x-axis (width) only + --circulary enable circular RoPE wrapping on y-axis (height) only --chroma-disable-dit-mask disable dit mask for chroma + --qwen-image-zero-cond-t enable zero_cond_t for qwen image --chroma-enable-t5-mask enable t5 mask for chroma --type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the type of the weight file @@ -93,6 +101,7 @@ Generation Options: --timestep-shift shift timestep for NitroFusion models (default: 0). recommended N for NitroSD-Realism around 250 and 500 for NitroSD-Vibrant --upscale-repeats Run the ESRGAN upscaler this many times (default: 1) + --upscale-tile-size tile size for ESRGAN upscaling (default: 128) --cfg-scale unconditional guidance scale: (default: 7.0) --img-cfg-scale image guidance scale for inpaint or instruct-pix2pix models: (default: same as --cfg-scale) --guidance distilled guidance scale for models with guidance input (default: 3.5) @@ -101,6 +110,7 @@ Generation Options: --skip-layer-start SLG enabling point (default: 0.01) --skip-layer-end SLG disabling point (default: 0.2) --eta eta in DDIM, only for DDIM and TCD (default: 0) + --flow-shift shift value for Flow models like SD3.x or WAN (default: auto) --high-noise-cfg-scale (high noise) unconditional guidance scale: (default: 7.0) --high-noise-img-cfg-scale (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale) --high-noise-guidance (high noise) distilled guidance scale for models with guidance input (default: 3.5) @@ -117,14 +127,22 @@ Generation Options: --disable-auto-resize-ref-image disable auto resize of ref images -s, --seed RNG seed (default: 42, use random seed for < 0) --sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, - tcd] (default: euler for Flux/SD3/Wan, euler_a otherwise) + tcd, res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a + otherwise) --high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, - ddim_trailing, tcd] default: euler for Flux/SD3/Wan, euler_a otherwise - --scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, lcm], - default: discrete + ddim_trailing, tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan, + euler_a otherwise + --scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, + kl_optimal, lcm, bong_tangent], default: discrete --sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0"). --skip-layers layers to skip for SLG steps (default: [7,8,9]) --high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9]) -r, --ref-image reference image for Flux Kontext models (can be used multiple times) - --easycache enable EasyCache for DiT models with optional "threshold,start_percent,end_percent" (default: 0.2,0.15,0.95) + --cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level) + --cache-option named cache params (key=value format, comma-separated). easycache/ucache: + threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: + "threshold=0.25" or "threshold=1.5,reset=0" + --cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u' + --scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache + --scm-policy SCM policy: 'dynamic' (default) or 'static' ``` diff --git a/examples/cli/avi_writer.h b/examples/cli/avi_writer.h index 84b204af..53b4749c 100644 --- a/examples/cli/avi_writer.h +++ b/examples/cli/avi_writer.h @@ -172,9 +172,9 @@ int create_mjpg_avi_from_sd_images(const char* filename, sd_image_t* images, int // Write '00dc' chunk (video frame) fwrite("00dc", 4, 1, f); - write_u32_le(f, jpeg_data.size); + write_u32_le(f, (uint32_t)jpeg_data.size); index[i].offset = ftell(f) - 8; - index[i].size = jpeg_data.size; + index[i].size = (uint32_t)jpeg_data.size; fwrite(jpeg_data.buf, 1, jpeg_data.size, f); // Align to even byte size diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index 42b909e4..f9e4928e 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -26,12 +26,16 @@ const char* previews_str[] = { "vae", }; +std::regex format_specifier_regex("(?:[^%]|^)(?:%%)*(%\\d{0,3}d)"); + struct SDCliParams { SDMode mode = IMG_GEN; std::string output_path = "output.png"; + int output_begin_idx = -1; bool verbose = false; bool canny_preprocess = false; + bool convert_name = false; preview_t preview_method = PREVIEW_NONE; int preview_interval = 1; @@ -49,7 +53,7 @@ struct SDCliParams { options.string_options = { {"-o", "--output", - "path to write result image to (default: ./output.png)", + "path to write result image to. you can use printf-style %d format specifiers for image sequences (default: ./output.png) (eg. output_%03d.png)", &output_path}, {"", "--preview-path", @@ -62,6 +66,10 @@ struct SDCliParams { "--preview-interval", "interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at every step)", &preview_interval}, + {"", + "--output-begin-idx", + "starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise)", + &output_begin_idx}, }; options.bool_options = { @@ -69,6 +77,10 @@ struct SDCliParams { "--canny", "apply canny preprocessor (edge detection)", true, &canny_preprocess}, + {"", + "--convert-name", + "convert tensor name (for convert mode)", + true, &convert_name}, {"-v", "--verbose", "print extra info", @@ -174,6 +186,7 @@ struct SDCliParams { << " verbose: " << (verbose ? "true" : "false") << ",\n" << " color: " << (color ? "true" : "false") << ",\n" << " canny_preprocess: " << (canny_preprocess ? "true" : "false") << ",\n" + << " convert_name: " << (convert_name ? "true" : "false") << ",\n" << " preview_method: " << previews_str[preview_method] << ",\n" << " preview_interval: " << preview_interval << ",\n" << " preview_path: \"" << preview_path << "\",\n" @@ -232,7 +245,7 @@ std::string get_image_params(const SDCliParams& cli_params, const SDContextParam parameter_string += "Guidance: " + std::to_string(gen_params.sample_params.guidance.distilled_guidance) + ", "; parameter_string += "Eta: " + std::to_string(gen_params.sample_params.eta) + ", "; parameter_string += "Seed: " + std::to_string(seed) + ", "; - parameter_string += "Size: " + std::to_string(gen_params.width) + "x" + std::to_string(gen_params.height) + ", "; + parameter_string += "Size: " + std::to_string(gen_params.get_resolved_width()) + "x" + std::to_string(gen_params.get_resolved_height()) + ", "; parameter_string += "Model: " + sd_basename(ctx_params.model_path) + ", "; parameter_string += "RNG: " + std::string(sd_rng_type_name(ctx_params.rng_type)) + ", "; if (ctx_params.sampler_rng_type != RNG_TYPE_COUNT) { @@ -338,6 +351,129 @@ void step_callback(int step, int frame_count, sd_image_t* image, bool is_noisy, } } +std::string format_frame_idx(std::string pattern, int frame_idx) { + std::smatch match; + std::string result = pattern; + while (std::regex_search(result, match, format_specifier_regex)) { + std::string specifier = match.str(1); + char buffer[32]; + snprintf(buffer, sizeof(buffer), specifier.c_str(), frame_idx); + result.replace(match.position(1), match.length(1), buffer); + } + + // Then replace all '%%' with '%' + size_t pos = 0; + while ((pos = result.find("%%", pos)) != std::string::npos) { + result.replace(pos, 2, "%"); + pos += 1; + } + return result; +} + +bool save_results(const SDCliParams& cli_params, + const SDContextParams& ctx_params, + const SDGenerationParams& gen_params, + sd_image_t* results, + int num_results) { + if (results == nullptr || num_results <= 0) { + return false; + } + + namespace fs = std::filesystem; + fs::path out_path = cli_params.output_path; + + if (!out_path.parent_path().empty()) { + std::error_code ec; + fs::create_directories(out_path.parent_path(), ec); + if (ec) { + LOG_ERROR("failed to create directory '%s': %s", + out_path.parent_path().string().c_str(), ec.message().c_str()); + return false; + } + } + + fs::path base_path = out_path; + fs::path ext = out_path.has_extension() ? out_path.extension() : fs::path{}; + + std::string ext_lower = ext.string(); + std::transform(ext_lower.begin(), ext_lower.end(), ext_lower.begin(), ::tolower); + bool is_jpg = (ext_lower == ".jpg" || ext_lower == ".jpeg" || ext_lower == ".jpe"); + if (!ext.empty()) { + if (is_jpg || ext_lower == ".png") { + base_path.replace_extension(); + } + } + + int output_begin_idx = cli_params.output_begin_idx; + if (output_begin_idx < 0) { + output_begin_idx = 0; + } + + auto write_image = [&](const fs::path& path, int idx) { + const sd_image_t& img = results[idx]; + if (!img.data) + return false; + + std::string params = get_image_params(cli_params, ctx_params, gen_params, gen_params.seed + idx); + int ok = 0; + if (is_jpg) { + ok = stbi_write_jpg(path.string().c_str(), img.width, img.height, img.channel, img.data, 90, params.c_str()); + } else { + ok = stbi_write_png(path.string().c_str(), img.width, img.height, img.channel, img.data, 0, params.c_str()); + } + LOG_INFO("save result image %d to '%s' (%s)", idx, path.string().c_str(), ok ? "success" : "failure"); + return ok != 0; + }; + + int sucessful_reults = 0; + + if (std::regex_search(cli_params.output_path, format_specifier_regex)) { + if (!is_jpg && ext_lower != ".png") + ext = ".png"; + fs::path pattern = base_path; + pattern += ext; + + for (int i = 0; i < num_results; ++i) { + fs::path img_path = format_frame_idx(pattern.string(), output_begin_idx + i); + if (write_image(img_path, i)) { + sucessful_reults++; + } + } + LOG_INFO("%d/%d images saved", sucessful_reults, num_results); + return sucessful_reults != 0; + } + + if (cli_params.mode == VID_GEN && num_results > 1) { + if (ext_lower != ".avi") + ext = ".avi"; + fs::path video_path = base_path; + video_path += ext; + if (create_mjpg_avi_from_sd_images(video_path.string().c_str(), results, num_results, gen_params.fps) == 0) { + LOG_INFO("save result MJPG AVI video to '%s'", video_path.string().c_str()); + return true; + } else { + LOG_ERROR("Failed to save result MPG AVI video to '%s'", video_path.string().c_str()); + return false; + } + } + + if (!is_jpg && ext_lower != ".png") + ext = ".png"; + + for (int i = 0; i < num_results; ++i) { + fs::path img_path = base_path; + if (num_results > 1) { + img_path += "_" + std::to_string(output_begin_idx + i); + } + img_path += ext; + if (write_image(img_path, i)) { + sucessful_reults++; + } + } + LOG_INFO("%d/%d images saved", sucessful_reults, num_results); + return sucessful_reults != 0; +} + int main(int argc, const char* argv[]) { if (argc > 1 && std::string(argv[1]) == "--version") { std::cout << version_string() << "\n"; @@ -387,7 +523,8 @@ int main(int argc, const char* argv[]) { ctx_params.vae_path.c_str(), cli_params.output_path.c_str(), ctx_params.wtype, - ctx_params.tensor_type_rules.c_str()); + ctx_params.tensor_type_rules.c_str(), + cli_params.convert_name); if (!success) { LOG_ERROR("convert '%s'/'%s' to '%s' failed", ctx_params.model_path.c_str(), @@ -404,10 +541,10 @@ int main(int argc, const char* argv[]) { } bool vae_decode_only = true; - sd_image_t init_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; - sd_image_t end_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; - sd_image_t control_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; - sd_image_t mask_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 1, nullptr}; + sd_image_t init_image = {0, 0, 3, nullptr}; + sd_image_t end_image = {0, 0, 3, nullptr}; + sd_image_t control_image = {0, 0, 3, nullptr}; + sd_image_t mask_image = {0, 0, 1, nullptr}; std::vector ref_images; std::vector pmid_images; std::vector control_frames; @@ -434,57 +571,79 @@ int main(int argc, const char* argv[]) { control_frames.clear(); }; + auto load_image_and_update_size = [&](const std::string& path, + sd_image_t& image, + bool resize_image = true, + int expected_channel = 3) -> bool { + int expected_width = 0; + int expected_height = 0; + if (resize_image && gen_params.width_and_height_are_set()) { + expected_width = gen_params.width; + expected_height = gen_params.height; + } + + if (!load_sd_image_from_file(&image, path.c_str(), expected_width, expected_height, expected_channel)) { + LOG_ERROR("load image from '%s' failed", path.c_str()); + release_all_resources(); + return false; + } + + gen_params.set_width_and_height_if_unset(image.width, image.height); + return true; + }; + if (gen_params.init_image_path.size() > 0) { vae_decode_only = false; - - int width = 0; - int height = 0; - init_image.data = load_image_from_file(gen_params.init_image_path.c_str(), width, height, gen_params.width, gen_params.height); - if (init_image.data == nullptr) { - LOG_ERROR("load image from '%s' failed", gen_params.init_image_path.c_str()); - release_all_resources(); + if (!load_image_and_update_size(gen_params.init_image_path, init_image)) { return 1; } } if (gen_params.end_image_path.size() > 0) { vae_decode_only = false; - - int width = 0; - int height = 0; - end_image.data = load_image_from_file(gen_params.end_image_path.c_str(), width, height, gen_params.width, gen_params.height); - if (end_image.data == nullptr) { - LOG_ERROR("load image from '%s' failed", gen_params.end_image_path.c_str()); - release_all_resources(); + if (!load_image_and_update_size(gen_params.init_image_path, end_image)) { return 1; } } + if (gen_params.ref_image_paths.size() > 0) { + vae_decode_only = false; + for (auto& path : gen_params.ref_image_paths) { + sd_image_t ref_image = {0, 0, 3, nullptr}; + if (!load_image_and_update_size(path, ref_image, false)) { + return 1; + } + ref_images.push_back(ref_image); + } + } + if (gen_params.mask_image_path.size() > 0) { - int c = 0; - int width = 0; - int height = 0; - mask_image.data = load_image_from_file(gen_params.mask_image_path.c_str(), width, height, gen_params.width, gen_params.height, 1); - if (mask_image.data == nullptr) { + if (!load_sd_image_from_file(&mask_image, + gen_params.mask_image_path.c_str(), + gen_params.get_resolved_width(), + gen_params.get_resolved_height(), + 1)) { LOG_ERROR("load image from '%s' failed", gen_params.mask_image_path.c_str()); release_all_resources(); return 1; } } else { - mask_image.data = (uint8_t*)malloc(gen_params.width * gen_params.height); - memset(mask_image.data, 255, gen_params.width * gen_params.height); + mask_image.data = (uint8_t*)malloc(gen_params.get_resolved_width() * gen_params.get_resolved_height()); if (mask_image.data == nullptr) { LOG_ERROR("malloc mask image failed"); release_all_resources(); return 1; } + mask_image.width = gen_params.get_resolved_width(); + mask_image.height = gen_params.get_resolved_height(); + memset(mask_image.data, 255, gen_params.get_resolved_width() * gen_params.get_resolved_height()); } if (gen_params.control_image_path.size() > 0) { - int width = 0; - int height = 0; - control_image.data = load_image_from_file(gen_params.control_image_path.c_str(), width, height, gen_params.width, gen_params.height); - if (control_image.data == nullptr) { + if (!load_sd_image_from_file(&control_image, + gen_params.control_image_path.c_str(), + gen_params.get_resolved_width(), + gen_params.get_resolved_height())) { LOG_ERROR("load image from '%s' failed", gen_params.control_image_path.c_str()); release_all_resources(); return 1; @@ -499,29 +658,11 @@ int main(int argc, const char* argv[]) { } } - if (gen_params.ref_image_paths.size() > 0) { - vae_decode_only = false; - for (auto& path : gen_params.ref_image_paths) { - int width = 0; - int height = 0; - uint8_t* image_buffer = load_image_from_file(path.c_str(), width, height); - if (image_buffer == nullptr) { - LOG_ERROR("load image from '%s' failed", path.c_str()); - release_all_resources(); - return 1; - } - ref_images.push_back({(uint32_t)width, - (uint32_t)height, - 3, - image_buffer}); - } - } - if (!gen_params.control_video_path.empty()) { if (!load_images_from_dir(gen_params.control_video_path, control_frames, - gen_params.width, - gen_params.height, + gen_params.get_resolved_width(), + gen_params.get_resolved_height(), gen_params.video_frames, cli_params.verbose)) { release_all_resources(); @@ -579,7 +720,7 @@ int main(int argc, const char* argv[]) { } if (gen_params.sample_params.scheduler == SCHEDULER_COUNT) { - gen_params.sample_params.scheduler = sd_get_default_scheduler(sd_ctx); + gen_params.sample_params.scheduler = sd_get_default_scheduler(sd_ctx, gen_params.sample_params.sample_method); } if (cli_params.mode == IMG_GEN) { @@ -595,8 +736,8 @@ int main(int argc, const char* argv[]) { gen_params.auto_resize_ref_image, gen_params.increase_ref_index, mask_image, - gen_params.width, - gen_params.height, + gen_params.get_resolved_width(), + gen_params.get_resolved_height(), gen_params.sample_params, gen_params.strength, gen_params.seed, @@ -610,7 +751,7 @@ int main(int argc, const char* argv[]) { gen_params.pm_style_strength, }, // pm_params ctx_params.vae_tiling_params, - gen_params.easycache_params, + gen_params.cache_params, }; results = generate_image(sd_ctx, &img_gen_params); @@ -626,8 +767,8 @@ int main(int argc, const char* argv[]) { end_image, control_frames.data(), (int)control_frames.size(), - gen_params.width, - gen_params.height, + gen_params.get_resolved_width(), + gen_params.get_resolved_height(), gen_params.sample_params, gen_params.high_noise_sample_params, gen_params.moe_boundary, @@ -635,7 +776,8 @@ int main(int argc, const char* argv[]) { gen_params.seed, gen_params.video_frames, gen_params.vace_strength, - gen_params.easycache_params, + ctx_params.vae_tiling_params, + gen_params.cache_params, }; results = generate_video(sd_ctx, &vid_gen_params, &num_results); @@ -680,67 +822,8 @@ int main(int argc, const char* argv[]) { } } - // create directory if not exists - { - const fs::path out_path = cli_params.output_path; - if (const fs::path out_dir = out_path.parent_path(); !out_dir.empty()) { - std::error_code ec; - fs::create_directories(out_dir, ec); // OK if already exists - if (ec) { - LOG_ERROR("failed to create directory '%s': %s", - out_dir.string().c_str(), ec.message().c_str()); - return 1; - } - } - } - - std::string base_path; - std::string file_ext; - std::string file_ext_lower; - bool is_jpg; - size_t last_dot_pos = cli_params.output_path.find_last_of("."); - size_t last_slash_pos = std::min(cli_params.output_path.find_last_of("/"), - cli_params.output_path.find_last_of("\\")); - if (last_dot_pos != std::string::npos && (last_slash_pos == std::string::npos || last_dot_pos > last_slash_pos)) { // filename has extension - base_path = cli_params.output_path.substr(0, last_dot_pos); - file_ext = file_ext_lower = cli_params.output_path.substr(last_dot_pos); - std::transform(file_ext.begin(), file_ext.end(), file_ext_lower.begin(), ::tolower); - is_jpg = (file_ext_lower == ".jpg" || file_ext_lower == ".jpeg" || file_ext_lower == ".jpe"); - } else { - base_path = cli_params.output_path; - file_ext = file_ext_lower = ""; - is_jpg = false; - } - - if (cli_params.mode == VID_GEN && num_results > 1) { - std::string vid_output_path = cli_params.output_path; - if (file_ext_lower == ".png") { - vid_output_path = base_path + ".avi"; - } - create_mjpg_avi_from_sd_images(vid_output_path.c_str(), results, num_results, gen_params.fps); - LOG_INFO("save result MJPG AVI video to '%s'\n", vid_output_path.c_str()); - } else { - // appending ".png" to absent or unknown extension - if (!is_jpg && file_ext_lower != ".png") { - base_path += file_ext; - file_ext = ".png"; - } - for (int i = 0; i < num_results; i++) { - if (results[i].data == nullptr) { - continue; - } - int write_ok; - std::string final_image_path = i > 0 ? base_path + "_" + std::to_string(i + 1) + file_ext : base_path + file_ext; - if (is_jpg) { - write_ok = stbi_write_jpg(final_image_path.c_str(), results[i].width, results[i].height, results[i].channel, - results[i].data, 90, get_image_params(cli_params, ctx_params, gen_params, gen_params.seed + i).c_str()); - LOG_INFO("save result JPEG image to '%s' (%s)", final_image_path.c_str(), write_ok == 0 ? "failure" : "success"); - } else { - write_ok = stbi_write_png(final_image_path.c_str(), results[i].width, results[i].height, results[i].channel, - results[i].data, 0, get_image_params(cli_params, ctx_params, gen_params, gen_params.seed + i).c_str()); - LOG_INFO("save result PNG image to '%s' (%s)", final_image_path.c_str(), write_ok == 0 ? "failure" : "success"); - } - } + if (!save_results(cli_params, ctx_params, gen_params, results, num_results)) { + return 1; } for (int i = 0; i < num_results; i++) { @@ -752,4 +835,4 @@ int main(int argc, const char* argv[]) { release_all_resources(); return 0; -} \ No newline at end of file +} diff --git a/examples/common/common.hpp b/examples/common/common.hpp index f3a56136..369c1f07 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -95,17 +95,28 @@ static void print_utf8(FILE* stream, const char* utf8) { ? GetStdHandle(STD_ERROR_HANDLE) : GetStdHandle(STD_OUTPUT_HANDLE); - int wlen = MultiByteToWideChar(CP_UTF8, 0, utf8, -1, NULL, 0); - if (wlen <= 0) - return; + DWORD mode; + BOOL is_console = GetConsoleMode(h, &mode); - wchar_t* wbuf = (wchar_t*)malloc(wlen * sizeof(wchar_t)); - MultiByteToWideChar(CP_UTF8, 0, utf8, -1, wbuf, wlen); + if (is_console) { + int wlen = MultiByteToWideChar(CP_UTF8, 0, utf8, -1, NULL, 0); + if (wlen <= 0) + return; - DWORD written; - WriteConsoleW(h, wbuf, wlen - 1, &written, NULL); + wchar_t* wbuf = (wchar_t*)malloc(wlen * sizeof(wchar_t)); + if (!wbuf) + return; - free(wbuf); + MultiByteToWideChar(CP_UTF8, 0, utf8, -1, wbuf, wlen); + + DWORD written; + WriteConsoleW(h, wbuf, wlen - 1, &written, NULL); + + free(wbuf); + } else { + DWORD written; + WriteFile(h, utf8, (DWORD)strlen(utf8), &written, NULL); + } #else fputs(utf8, stream); #endif @@ -434,7 +445,7 @@ struct SDContextParams { std::string photo_maker_path; sd_type_t wtype = SD_TYPE_COUNT; std::string tensor_type_rules; - std::string lora_model_dir; + std::string lora_model_dir = "."; std::map embedding_map; std::vector embedding_vec; @@ -442,17 +453,25 @@ struct SDContextParams { rng_type_t rng_type = CUDA_RNG; rng_type_t sampler_rng_type = RNG_TYPE_COUNT; bool offload_params_to_cpu = false; + bool enable_mmap = false; bool control_net_cpu = false; bool clip_on_cpu = false; bool vae_on_cpu = false; + bool flash_attn = false; bool diffusion_flash_attn = false; bool diffusion_conv_direct = false; bool vae_conv_direct = false; + bool circular = false; + bool circular_x = false; + bool circular_y = false; + bool chroma_use_dit_mask = true; bool chroma_use_t5_mask = false; int chroma_t5_mask_pad = 1; + bool qwen_image_zero_cond_t = false; + prediction_t prediction = PREDICTION_COUNT; lora_apply_mode_t lora_apply_mode = LORA_APPLY_AUTO; @@ -562,10 +581,6 @@ struct SDContextParams { "--vae-tile-overlap", "tile overlap for vae tiling, in fraction of tile size (default: 0.5)", &vae_tiling_params.target_overlap}, - {"", - "--flow-shift", - "shift value for Flow models like SD3.x or WAN (default: auto)", - &flow_shift}, }; options.bool_options = { @@ -581,6 +596,10 @@ struct SDContextParams { "--offload-to-cpu", "place the weights in RAM to save VRAM, and automatically load them into VRAM when needed", true, &offload_params_to_cpu}, + {"", + "--mmap", + "whether to memory-map model", + true, &enable_mmap}, {"", "--control-net-cpu", "keep controlnet in cpu (for low vram)", @@ -593,9 +612,13 @@ struct SDContextParams { "--vae-on-cpu", "keep vae in cpu (for low vram)", true, &vae_on_cpu}, + {"", + "--fa", + "use flash attention", + true, &flash_attn}, {"", "--diffusion-fa", - "use flash attention in the diffusion model", + "use flash attention in the diffusion model only", true, &diffusion_flash_attn}, {"", "--diffusion-conv-direct", @@ -605,10 +628,26 @@ struct SDContextParams { "--vae-conv-direct", "use ggml_conv2d_direct in the vae model", true, &vae_conv_direct}, + {"", + "--circular", + "enable circular padding for convolutions", + true, &circular}, + {"", + "--circularx", + "enable circular RoPE wrapping on x-axis (width) only", + true, &circular_x}, + {"", + "--circulary", + "enable circular RoPE wrapping on y-axis (height) only", + true, &circular_y}, {"", "--chroma-disable-dit-mask", "disable dit mask for chroma", false, &chroma_use_dit_mask}, + {"", + "--qwen-image-zero-cond-t", + "enable zero_cond_t for qwen image", + true, &qwen_image_zero_cond_t}, {"", "--chroma-enable-t5-mask", "enable t5 mask for chroma", @@ -771,7 +810,7 @@ struct SDContextParams { } void build_embedding_map() { - static const std::vector valid_ext = {".pt", ".safetensors", ".gguf"}; + static const std::vector valid_ext = {".gguf", ".safetensors", ".pt"}; if (!fs::exists(embedding_dir) || !fs::is_directory(embedding_dir)) { return; @@ -860,15 +899,20 @@ struct SDContextParams { << " photo_maker_path: \"" << photo_maker_path << "\",\n" << " rng_type: " << sd_rng_type_name(rng_type) << ",\n" << " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n" - << " flow_shift: " << (std::isinf(flow_shift) ? "INF" : std::to_string(flow_shift)) << "\n" << " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n" + << " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n" << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" + << " flash_attn: " << (flash_attn ? "true" : "false") << ",\n" << " diffusion_flash_attn: " << (diffusion_flash_attn ? "true" : "false") << ",\n" << " diffusion_conv_direct: " << (diffusion_conv_direct ? "true" : "false") << ",\n" << " vae_conv_direct: " << (vae_conv_direct ? "true" : "false") << ",\n" + << " circular: " << (circular ? "true" : "false") << ",\n" + << " circular_x: " << (circular_x ? "true" : "false") << ",\n" + << " circular_y: " << (circular_y ? "true" : "false") << ",\n" << " chroma_use_dit_mask: " << (chroma_use_dit_mask ? "true" : "false") << ",\n" + << " qwen_image_zero_cond_t: " << (qwen_image_zero_cond_t ? "true" : "false") << ",\n" << " chroma_use_t5_mask: " << (chroma_use_t5_mask ? "true" : "false") << ",\n" << " chroma_t5_mask_pad: " << chroma_t5_mask_pad << ",\n" << " prediction: " << sd_prediction_name(prediction) << ",\n" @@ -921,18 +965,22 @@ struct SDContextParams { prediction, lora_apply_mode, offload_params_to_cpu, + enable_mmap, clip_on_cpu, control_net_cpu, vae_on_cpu, + flash_attn, diffusion_flash_attn, taesd_preview, diffusion_conv_direct, vae_conv_direct, + circular || circular_x, + circular || circular_y, force_sdxl_vae_conv_scale, chroma_use_dit_mask, chroma_use_t5_mask, chroma_t5_mask_pad, - flow_shift, + qwen_image_zero_cond_t, }; return sd_ctx_params; } @@ -977,8 +1025,8 @@ struct SDGenerationParams { std::string prompt_with_lora; // for metadata record only std::string negative_prompt; int clip_skip = -1; // <= 0 represents unspecified - int width = 512; - int height = 512; + int width = -1; + int height = -1; int batch_count = 1; std::string init_image_path; std::string end_image_path; @@ -997,8 +1045,12 @@ struct SDGenerationParams { std::vector custom_sigmas; - std::string easycache_option; - sd_easycache_params_t easycache_params; + std::string cache_mode; + std::string cache_option; + std::string cache_preset; + std::string scm_mask; + bool scm_policy_dynamic = true; + sd_cache_params_t cache_params{}; float moe_boundary = 0.875f; int video_frames = 1; @@ -1148,6 +1200,10 @@ struct SDGenerationParams { "--eta", "eta in DDIM, only for DDIM and TCD (default: 0)", &sample_params.eta}, + {"", + "--flow-shift", + "shift value for Flow models like SD3.x or WAN (default: auto)", + &sample_params.flow_shift}, {"", "--high-noise-cfg-scale", "(high noise) unconditional guidance scale: (default: 7.0)", @@ -1335,10 +1391,10 @@ struct SDGenerationParams { if (!item.empty()) { try { custom_sigmas.push_back(std::stof(item)); - } catch (const std::invalid_argument& e) { + } catch (const std::invalid_argument&) { LOG_ERROR("error: invalid float value '%s' in --sigmas", item.c_str()); return -1; - } catch (const std::out_of_range& e) { + } catch (const std::out_of_range&) { LOG_ERROR("error: float value '%s' out of range in --sigmas", item.c_str()); return -1; } @@ -1360,36 +1416,64 @@ struct SDGenerationParams { return 1; }; - auto on_easycache_arg = [&](int argc, const char** argv, int index) { - const std::string default_values = "0.2,0.15,0.95"; - auto looks_like_value = [](const std::string& token) { - if (token.empty()) { - return false; - } - if (token[0] != '-') { - return true; - } - if (token.size() == 1) { - return false; - } - unsigned char next = static_cast(token[1]); - return std::isdigit(next) || token[1] == '.'; - }; + auto on_cache_mode_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; + } + cache_mode = argv_to_utf8(index, argv); + if (cache_mode != "easycache" && cache_mode != "ucache" && + cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit") { + fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', or 'cache-dit'\n", cache_mode.c_str()); + return -1; + } + return 1; + }; - std::string option_value; - int consumed = 0; - if (index + 1 < argc) { - std::string next_arg = argv[index + 1]; - if (looks_like_value(next_arg)) { - option_value = argv_to_utf8(index + 1, argv); - consumed = 1; - } + auto on_cache_option_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; } - if (option_value.empty()) { - option_value = default_values; + cache_option = argv_to_utf8(index, argv); + return 1; + }; + + auto on_scm_mask_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; } - easycache_option = option_value; - return consumed; + scm_mask = argv_to_utf8(index, argv); + return 1; + }; + + auto on_scm_policy_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; + } + std::string policy = argv_to_utf8(index, argv); + if (policy == "dynamic") { + scm_policy_dynamic = true; + } else if (policy == "static") { + scm_policy_dynamic = false; + } else { + fprintf(stderr, "error: invalid scm policy '%s', must be 'dynamic' or 'static'\n", policy.c_str()); + return -1; + } + return 1; + }; + + auto on_cache_preset_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; + } + cache_preset = argv_to_utf8(index, argv); + if (cache_preset != "slow" && cache_preset != "s" && cache_preset != "S" && + cache_preset != "medium" && cache_preset != "m" && cache_preset != "M" && + cache_preset != "fast" && cache_preset != "f" && cache_preset != "F" && + cache_preset != "ultra" && cache_preset != "u" && cache_preset != "U") { + fprintf(stderr, "error: invalid cache preset '%s', must be 'slow'/'s', 'medium'/'m', 'fast'/'f', or 'ultra'/'u'\n", cache_preset.c_str()); + return -1; + } + return 1; }; options.manual_options = { @@ -1399,17 +1483,17 @@ struct SDGenerationParams { on_seed_arg}, {"", "--sampling-method", - "sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd] " + "sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s] " "(default: euler for Flux/SD3/Wan, euler_a otherwise)", on_sample_method_arg}, {"", "--high-noise-sampling-method", - "(high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd]" + "(high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s]" " default: euler for Flux/SD3/Wan, euler_a otherwise", on_high_noise_sample_method_arg}, {"", "--scheduler", - "denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, lcm], default: discrete", + "denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, kl_optimal, lcm, bong_tangent], default: discrete", on_scheduler_arg}, {"", "--sigmas", @@ -1428,9 +1512,25 @@ struct SDGenerationParams { "reference image for Flux Kontext models (can be used multiple times)", on_ref_image_arg}, {"", - "--easycache", - "enable EasyCache for DiT models with optional \"threshold,start_percent,end_percent\" (default: 0.2,0.15,0.95)", - on_easycache_arg}, + "--cache-mode", + "caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)", + on_cache_mode_arg}, + {"", + "--cache-option", + "named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"", + on_cache_option_arg}, + {"", + "--cache-preset", + "cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'", + on_cache_preset_arg}, + {"", + "--scm-mask", + "SCM steps mask for cache-dit: comma-separated 0/1 (e.g., \"1,1,1,0,0,1,0,0,1,0\") - 1=compute, 0=can cache", + on_scm_mask_arg}, + {"", + "--scm-policy", + "SCM policy: 'dynamic' (default) or 'static'", + on_scm_policy_arg}, }; @@ -1473,7 +1573,10 @@ struct SDGenerationParams { load_if_exists("prompt", prompt); load_if_exists("negative_prompt", negative_prompt); - load_if_exists("easycache_option", easycache_option); + load_if_exists("cache_mode", cache_mode); + load_if_exists("cache_option", cache_option); + load_if_exists("cache_preset", cache_preset); + load_if_exists("scm_mask", scm_mask); load_if_exists("clip_skip", clip_skip); load_if_exists("width", width); @@ -1496,9 +1599,30 @@ struct SDGenerationParams { load_if_exists("skip_layers", skip_layers); load_if_exists("high_noise_skip_layers", high_noise_skip_layers); + load_if_exists("steps", sample_params.sample_steps); + load_if_exists("high_noise_steps", high_noise_sample_params.sample_steps); load_if_exists("cfg_scale", sample_params.guidance.txt_cfg); load_if_exists("img_cfg_scale", sample_params.guidance.img_cfg); load_if_exists("guidance", sample_params.guidance.distilled_guidance); + load_if_exists("flow_shift", sample_params.flow_shift); + + auto load_sampler_if_exists = [&](const char* key, enum sample_method_t& out) { + if (j.contains(key) && j[key].is_string()) { + enum sample_method_t tmp = str_to_sample_method(j[key].get().c_str()); + if (tmp != SAMPLE_METHOD_COUNT) { + out = tmp; + } + } + }; + load_sampler_if_exists("sample_method", sample_params.sample_method); + load_sampler_if_exists("high_noise_sample_method", high_noise_sample_params.sample_method); + + if (j.contains("scheduler") && j["scheduler"].is_string()) { + enum scheduler_t tmp = str_to_scheduler(j["scheduler"].get().c_str()); + if (tmp != SCHEDULER_COUNT) { + sample_params.scheduler = tmp; + } + } return true; } @@ -1508,7 +1632,7 @@ struct SDGenerationParams { return; } static const std::regex re(R"(]+):([^>]+)>)"); - static const std::vector valid_ext = {".pt", ".safetensors", ".gguf"}; + static const std::vector valid_ext = {".gguf", ".safetensors", ".pt"}; std::smatch m; std::string tmp = prompt; @@ -1587,17 +1711,24 @@ struct SDGenerationParams { } } + bool width_and_height_are_set() const { + return width > 0 && height > 0; + } + + void set_width_and_height_if_unset(int w, int h) { + if (!width_and_height_are_set()) { + LOG_INFO("set width x height to %d x %d", w, h); + width = w; + height = h; + } + } + + int get_resolved_width() const { return (width > 0) ? width : 512; } + + int get_resolved_height() const { return (height > 0) ? height : 512; } + bool process_and_check(SDMode mode, const std::string& lora_model_dir) { prompt_with_lora = prompt; - if (width <= 0) { - LOG_ERROR("error: the width must be greater than 0\n"); - return false; - } - - if (height <= 0) { - LOG_ERROR("error: the height must be greater than 0\n"); - return false; - } if (sample_params.sample_steps <= 0) { LOG_ERROR("error: the sample_steps must be greater than 0\n"); @@ -1613,57 +1744,118 @@ struct SDGenerationParams { return false; } - if (!easycache_option.empty()) { - float values[3] = {0.0f, 0.0f, 0.0f}; - std::stringstream ss(easycache_option); + sd_cache_params_init(&cache_params); + + auto parse_named_params = [&](const std::string& opt_str) -> bool { + std::stringstream ss(opt_str); std::string token; - int idx = 0; while (std::getline(ss, token, ',')) { - auto trim = [](std::string& s) { - const char* whitespace = " \t\r\n"; - auto start = s.find_first_not_of(whitespace); - if (start == std::string::npos) { - s.clear(); - return; - } - auto end = s.find_last_not_of(whitespace); - s = s.substr(start, end - start + 1); - }; - trim(token); - if (token.empty()) { - LOG_ERROR("error: invalid easycache option '%s'", easycache_option.c_str()); - return false; - } - if (idx >= 3) { - LOG_ERROR("error: easycache expects exactly 3 comma-separated values (threshold,start,end)\n"); + size_t eq_pos = token.find('='); + if (eq_pos == std::string::npos) { + LOG_ERROR("error: cache option '%s' missing '=' separator", token.c_str()); return false; } + std::string key = token.substr(0, eq_pos); + std::string val = token.substr(eq_pos + 1); try { - values[idx] = std::stof(token); + if (key == "threshold") { + if (cache_mode == "easycache" || cache_mode == "ucache") { + cache_params.reuse_threshold = std::stof(val); + } else { + cache_params.residual_diff_threshold = std::stof(val); + } + } else if (key == "start") { + cache_params.start_percent = std::stof(val); + } else if (key == "end") { + cache_params.end_percent = std::stof(val); + } else if (key == "decay") { + cache_params.error_decay_rate = std::stof(val); + } else if (key == "relative") { + cache_params.use_relative_threshold = (std::stof(val) != 0.0f); + } else if (key == "reset") { + cache_params.reset_error_on_compute = (std::stof(val) != 0.0f); + } else if (key == "Fn" || key == "fn") { + cache_params.Fn_compute_blocks = std::stoi(val); + } else if (key == "Bn" || key == "bn") { + cache_params.Bn_compute_blocks = std::stoi(val); + } else if (key == "warmup") { + cache_params.max_warmup_steps = std::stoi(val); + } else { + LOG_ERROR("error: unknown cache parameter '%s'", key.c_str()); + return false; + } } catch (const std::exception&) { - LOG_ERROR("error: invalid easycache value '%s'", token.c_str()); + LOG_ERROR("error: invalid value '%s' for parameter '%s'", val.c_str(), key.c_str()); return false; } - idx++; } - if (idx != 3) { - LOG_ERROR("error: easycache expects exactly 3 comma-separated values (threshold,start,end)\n"); - return false; + return true; + }; + + if (!cache_mode.empty()) { + if (cache_mode == "easycache") { + cache_params.mode = SD_CACHE_EASYCACHE; + cache_params.reuse_threshold = 0.2f; + cache_params.start_percent = 0.15f; + cache_params.end_percent = 0.95f; + cache_params.error_decay_rate = 1.0f; + cache_params.use_relative_threshold = true; + cache_params.reset_error_on_compute = true; + } else if (cache_mode == "ucache") { + cache_params.mode = SD_CACHE_UCACHE; + cache_params.reuse_threshold = 1.0f; + cache_params.start_percent = 0.15f; + cache_params.end_percent = 0.95f; + cache_params.error_decay_rate = 1.0f; + cache_params.use_relative_threshold = true; + cache_params.reset_error_on_compute = true; + } else if (cache_mode == "dbcache") { + cache_params.mode = SD_CACHE_DBCACHE; + cache_params.Fn_compute_blocks = 8; + cache_params.Bn_compute_blocks = 0; + cache_params.residual_diff_threshold = 0.08f; + cache_params.max_warmup_steps = 8; + } else if (cache_mode == "taylorseer") { + cache_params.mode = SD_CACHE_TAYLORSEER; + cache_params.Fn_compute_blocks = 8; + cache_params.Bn_compute_blocks = 0; + cache_params.residual_diff_threshold = 0.08f; + cache_params.max_warmup_steps = 8; + } else if (cache_mode == "cache-dit") { + cache_params.mode = SD_CACHE_CACHE_DIT; + cache_params.Fn_compute_blocks = 8; + cache_params.Bn_compute_blocks = 0; + cache_params.residual_diff_threshold = 0.08f; + cache_params.max_warmup_steps = 8; } - if (values[0] < 0.0f) { - LOG_ERROR("error: easycache threshold must be non-negative\n"); - return false; + + if (!cache_option.empty()) { + if (!parse_named_params(cache_option)) { + return false; + } } - if (values[1] < 0.0f || values[1] >= 1.0f || values[2] <= 0.0f || values[2] > 1.0f || values[1] >= values[2]) { - LOG_ERROR("error: easycache start/end percents must satisfy 0.0 <= start < end <= 1.0\n"); - return false; + + if (cache_mode == "easycache" || cache_mode == "ucache") { + if (cache_params.reuse_threshold < 0.0f) { + LOG_ERROR("error: cache threshold must be non-negative"); + return false; + } + if (cache_params.start_percent < 0.0f || cache_params.start_percent >= 1.0f || + cache_params.end_percent <= 0.0f || cache_params.end_percent > 1.0f || + cache_params.start_percent >= cache_params.end_percent) { + LOG_ERROR("error: cache start/end percents must satisfy 0.0 <= start < end <= 1.0"); + return false; + } } - easycache_params.enabled = true; - easycache_params.reuse_threshold = values[0]; - easycache_params.start_percent = values[1]; - easycache_params.end_percent = values[2]; - } else { - easycache_params.enabled = false; + } + + if (cache_params.mode == SD_CACHE_DBCACHE || + cache_params.mode == SD_CACHE_TAYLORSEER || + cache_params.mode == SD_CACHE_CACHE_DIT) { + if (!scm_mask.empty()) { + cache_params.scm_mask = scm_mask.c_str(); + } + cache_params.scm_policy_dynamic = scm_policy_dynamic; } sample_params.guidance.slg.layers = skip_layers.data(); @@ -1765,12 +1957,13 @@ struct SDGenerationParams { << " high_noise_skip_layers: " << vec_to_string(high_noise_skip_layers) << ",\n" << " high_noise_sample_params: " << high_noise_sample_params_str << ",\n" << " custom_sigmas: " << vec_to_string(custom_sigmas) << ",\n" - << " easycache_option: \"" << easycache_option << "\",\n" - << " easycache: " - << (easycache_params.enabled ? "enabled" : "disabled") - << " (threshold=" << easycache_params.reuse_threshold - << ", start=" << easycache_params.start_percent - << ", end=" << easycache_params.end_percent << "),\n" + << " cache_mode: \"" << cache_mode << "\",\n" + << " cache_option: \"" << cache_option << "\",\n" + << " cache: " + << (cache_params.mode != SD_CACHE_DISABLED ? "enabled" : "disabled") + << " (threshold=" << cache_params.reuse_threshold + << ", start=" << cache_params.start_percent + << ", end=" << cache_params.end_percent << "),\n" << " moe_boundary: " << moe_boundary << ",\n" << " video_frames: " << video_frames << ",\n" << " fps: " << fps << ",\n" @@ -1903,6 +2096,22 @@ uint8_t* load_image_from_file(const char* image_path, return load_image_common(false, image_path, 0, width, height, expected_width, expected_height, expected_channel); } +bool load_sd_image_from_file(sd_image_t* image, + const char* image_path, + int expected_width = 0, + int expected_height = 0, + int expected_channel = 3) { + int width; + int height; + image->data = load_image_common(false, image_path, 0, width, height, expected_width, expected_height, expected_channel); + if (image->data == nullptr) { + return false; + } + image->width = width; + image->height = height; + return true; +} + uint8_t* load_image_from_memory(const char* image_bytes, int len, int& width, @@ -1911,4 +2120,4 @@ uint8_t* load_image_from_memory(const char* image_bytes, int expected_height = 0, int expected_channel = 3) { return load_image_common(true, image_bytes, len, width, height, expected_width, expected_height, expected_channel); -} \ No newline at end of file +} diff --git a/examples/server/README.md b/examples/server/README.md index a475856f..75544364 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -4,11 +4,12 @@ usage: ./bin/sd-server [options] Svr Options: - -l, --listen-ip server listen ip (default: 127.0.0.1) - --listen-port server listen port (default: 1234) - -v, --verbose print extra info - --color colors the logging tags according to level - -h, --help show this help message and exit + -l, --listen-ip server listen ip (default: 127.0.0.1) + --serve-html-path path to HTML file to serve at root (optional) + --listen-port server listen port (default: 1234) + -v, --verbose print extra info + --color colors the logging tags according to level + -h, --help show this help message and exit Context Options: -m, --model path to full model @@ -35,17 +36,22 @@ Context Options: CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma --vae-tile-overlap tile overlap for vae tiling, in fraction of tile size (default: 0.5) - --flow-shift shift value for Flow models like SD3.x or WAN (default: auto) --vae-tiling process vae in tiles to reduce memory usage --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed + --mmap whether to memory-map model --control-net-cpu keep controlnet in cpu (for low vram) --clip-on-cpu keep clip in cpu (for low vram) --vae-on-cpu keep vae in cpu (for low vram) - --diffusion-fa use flash attention in the diffusion model + --fa use flash attention + --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model --vae-conv-direct use ggml_conv2d_direct in the vae model + --circular enable circular padding for convolutions + --circularx enable circular RoPE wrapping on x-axis (width) only + --circulary enable circular RoPE wrapping on y-axis (height) only --chroma-disable-dit-mask disable dit mask for chroma + --qwen-image-zero-cond-t enable zero_cond_t for qwen image --chroma-enable-t5-mask enable t5 mask for chroma --type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the type of the weight file @@ -95,6 +101,7 @@ Default Generation Options: --skip-layer-start SLG enabling point (default: 0.01) --skip-layer-end SLG disabling point (default: 0.2) --eta eta in DDIM, only for DDIM and TCD (default: 0) + --flow-shift shift value for Flow models like SD3.x or WAN (default: auto) --high-noise-cfg-scale (high noise) unconditional guidance scale: (default: 7.0) --high-noise-img-cfg-scale (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale) --high-noise-guidance (high noise) distilled guidance scale for models with guidance input (default: 3.5) @@ -111,14 +118,22 @@ Default Generation Options: --disable-auto-resize-ref-image disable auto resize of ref images -s, --seed RNG seed (default: 42, use random seed for < 0) --sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, - tcd] (default: euler for Flux/SD3/Wan, euler_a otherwise) + tcd, res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a + otherwise) --high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, - ddim_trailing, tcd] default: euler for Flux/SD3/Wan, euler_a otherwise - --scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, lcm], - default: discrete + ddim_trailing, tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan, + euler_a otherwise + --scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, + kl_optimal, lcm, bong_tangent], default: discrete --sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0"). --skip-layers layers to skip for SLG steps (default: [7,8,9]) --high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9]) -r, --ref-image reference image for Flux Kontext models (can be used multiple times) - --easycache enable EasyCache for DiT models with optional "threshold,start_percent,end_percent" (default: 0.2,0.15,0.95) -``` \ No newline at end of file + --cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level) + --cache-option named cache params (key=value format, comma-separated). easycache/ucache: + threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: + "threshold=0.25" or "threshold=1.5,reset=0" + --cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u' + --scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache + --scm-policy SCM policy: 'dynamic' (default) or 'static' +``` diff --git a/examples/server/main.cpp b/examples/server/main.cpp index 39359fbb..cc9e66cc 100644 --- a/examples/server/main.cpp +++ b/examples/server/main.cpp @@ -44,7 +44,7 @@ inline bool is_base64(unsigned char c) { } std::vector base64_decode(const std::string& encoded_string) { - int in_len = encoded_string.size(); + int in_len = static_cast(encoded_string.size()); int i = 0; int j = 0; int in_ = 0; @@ -86,27 +86,13 @@ std::vector base64_decode(const std::string& encoded_string) { return ret; } -std::string iso_timestamp_now() { - using namespace std::chrono; - auto now = system_clock::now(); - std::time_t t = system_clock::to_time_t(now); - std::tm tm{}; -#ifdef _MSC_VER - gmtime_s(&tm, &t); -#else - gmtime_r(&t, &tm); -#endif - std::ostringstream oss; - oss << std::put_time(&tm, "%Y-%m-%dT%H:%M:%SZ"); - return oss.str(); -} - struct SDSvrParams { std::string listen_ip = "127.0.0.1"; int listen_port = 1234; - bool normal_exit = false; - bool verbose = false; - bool color = false; + std::string serve_html_path; + bool normal_exit = false; + bool verbose = false; + bool color = false; ArgOptions get_options() { ArgOptions options; @@ -115,7 +101,11 @@ struct SDSvrParams { {"-l", "--listen-ip", "server listen ip (default: 127.0.0.1)", - &listen_ip}}; + &listen_ip}, + {"", + "--serve-html-path", + "path to HTML file to serve at root (optional)", + &serve_html_path}}; options.int_options = { {"", @@ -159,6 +149,11 @@ struct SDSvrParams { LOG_ERROR("error: listen_port should be in the range [0, 65535]"); return false; } + + if (!serve_html_path.empty() && !fs::exists(serve_html_path)) { + LOG_ERROR("error: serve_html_path file does not exist: %s", serve_html_path.c_str()); + return false; + } return true; } @@ -167,6 +162,7 @@ struct SDSvrParams { oss << "SDSvrParams {\n" << " listen_ip: " << listen_ip << ",\n" << " listen_port: \"" << listen_port << "\",\n" + << " serve_html_path: \"" << serve_html_path << "\",\n" << "}"; return oss.str(); } @@ -191,12 +187,18 @@ void parse_args(int argc, const char** argv, SDSvrParams& svr_params, SDContextP exit(svr_params.normal_exit ? 0 : 1); } + const bool random_seed_requested = default_gen_params.seed < 0; + if (!svr_params.process_and_check() || !ctx_params.process_and_check(IMG_GEN) || !default_gen_params.process_and_check(IMG_GEN, ctx_params.lora_model_dir)) { print_usage(argc, argv, options_vec); exit(1); } + + if (random_seed_requested) { + default_gen_params.seed = -1; + } } std::string extract_and_remove_sd_cpp_extra_args(std::string& text) { @@ -261,6 +263,24 @@ void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) { log_print(level, log, svr_params->verbose, svr_params->color); } +struct LoraEntry { + std::string name; + std::string path; + std::string fullpath; +}; + +void free_results(sd_image_t* result_images, int num_results) { + if (result_images) { + for (int i = 0; i < num_results; ++i) { + if (result_images[i].data) { + stbi_image_free(result_images[i].data); + result_images[i].data = nullptr; + } + } + } + free(result_images); +} + int main(int argc, const char** argv) { if (argc > 1 && std::string(argv[1]) == "--version") { std::cout << version_string() << "\n"; @@ -291,6 +311,56 @@ int main(int argc, const char** argv) { std::mutex sd_ctx_mutex; + std::vector lora_cache; + std::mutex lora_mutex; + + auto refresh_lora_cache = [&]() { + std::vector new_cache; + + fs::path lora_dir = ctx_params.lora_model_dir; + if (fs::exists(lora_dir) && fs::is_directory(lora_dir)) { + auto is_lora_ext = [](const fs::path& p) { + auto ext = p.extension().string(); + std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower); + return ext == ".gguf" || ext == ".pt" || ext == ".pth" || ext == ".safetensors"; + }; + + for (auto& entry : fs::recursive_directory_iterator(lora_dir)) { + if (!entry.is_regular_file()) + continue; + const fs::path& p = entry.path(); + if (!is_lora_ext(p)) + continue; + + LoraEntry e; + e.name = p.stem().u8string(); + e.fullpath = p.u8string(); + std::string rel = p.lexically_relative(lora_dir).u8string(); + std::replace(rel.begin(), rel.end(), '\\', '/'); + e.path = rel; + + new_cache.push_back(std::move(e)); + } + } + + std::sort(new_cache.begin(), new_cache.end(), + [](const LoraEntry& a, const LoraEntry& b) { + return a.path < b.path; + }); + + { + std::lock_guard lock(lora_mutex); + lora_cache = std::move(new_cache); + } + }; + + auto get_lora_full_path = [&](const std::string& path) -> std::string { + std::lock_guard lock(lora_mutex); + auto it = std::find_if(lora_cache.begin(), lora_cache.end(), + [&](const LoraEntry& e) { return e.path == path; }); + return (it != lora_cache.end()) ? it->fullpath : ""; + }; + httplib::Server svr; svr.set_pre_routing_handler([](const httplib::Request& req, httplib::Response& res) { @@ -310,9 +380,20 @@ int main(int argc, const char** argv) { return httplib::Server::HandlerResponse::Unhandled; }); - // health + // root svr.Get("/", [&](const httplib::Request&, httplib::Response& res) { - res.set_content(R"({"ok":true,"service":"sd-cpp-http"})", "application/json"); + if (!svr_params.serve_html_path.empty()) { + std::ifstream file(svr_params.serve_html_path); + if (file) { + std::string content((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + res.set_content(content, "text/html"); + } else { + res.status = 500; + res.set_content("Error: Unable to read HTML file", "text/plain"); + } + } else { + res.set_content("Stable Diffusion Server is running", "text/plain"); + } }); // models endpoint (minimal) @@ -338,8 +419,8 @@ int main(int argc, const char** argv) { std::string size = j.value("size", ""); std::string output_format = j.value("output_format", "png"); int output_compression = j.value("output_compression", 100); - int width = 512; - int height = 512; + int width = default_gen_params.width > 0 ? default_gen_params.width : 512; + int height = default_gen_params.width > 0 ? default_gen_params.height : 512; if (!size.empty()) { auto pos = size.find('x'); if (pos != std::string::npos) { @@ -376,7 +457,7 @@ int main(int argc, const char** argv) { } json out; - out["created"] = iso_timestamp_now(); + out["created"] = static_cast(std::time(nullptr)); out["data"] = json::array(); out["output_format"] = output_format; @@ -392,6 +473,9 @@ int main(int argc, const char** argv) { return; } + if (gen_params.sample_params.sample_steps > 100) + gen_params.sample_params.sample_steps = 100; + if (!gen_params.process_and_check(IMG_GEN, "")) { res.status = 400; res.set_content(R"({"error":"invalid params"})", "application/json"); @@ -432,7 +516,7 @@ int main(int argc, const char** argv) { gen_params.pm_style_strength, }, // pm_params ctx_params.vae_tiling_params, - gen_params.easycache_params, + gen_params.cache_params, }; sd_image_t* results = nullptr; @@ -465,6 +549,7 @@ int main(int argc, const char** argv) { item["b64_json"] = b64; out["data"].push_back(item); } + free_results(results, num_results); res.set_content(out.dump(), "application/json"); res.status = 200; @@ -495,8 +580,9 @@ int main(int argc, const char** argv) { std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(prompt); - size_t image_count = req.form.get_file_count("image[]"); - if (image_count == 0) { + size_t image_count = req.form.get_file_count("image[]"); + bool has_legacy_image = req.form.has_file("image"); + if (image_count == 0 && !has_legacy_image) { res.status = 400; res.set_content(R"({"error":"at least one image[] required"})", "application/json"); return; @@ -507,9 +593,13 @@ int main(int argc, const char** argv) { auto file = req.form.get_file("image[]", i); images_bytes.emplace_back(file.content.begin(), file.content.end()); } + if (image_count == 0 && has_legacy_image) { + auto file = req.form.get_file("image"); + images_bytes.emplace_back(file.content.begin(), file.content.end()); + } std::vector mask_bytes; - if (req.form.has_field("mask")) { + if (req.form.has_file("mask")) { auto file = req.form.get_file("mask"); mask_bytes.assign(file.content.begin(), file.content.end()); } @@ -524,7 +614,7 @@ int main(int argc, const char** argv) { n = std::clamp(n, 1, 8); std::string size = req.form.get_field("size"); - int width = 512, height = 512; + int width = -1, height = -1; if (!size.empty()) { auto pos = size.find('x'); if (pos != std::string::npos) { @@ -570,6 +660,9 @@ int main(int argc, const char** argv) { return; } + if (gen_params.sample_params.sample_steps > 100) + gen_params.sample_params.sample_steps = 100; + if (!gen_params.process_and_check(IMG_GEN, "")) { res.status = 400; res.set_content(R"({"error":"invalid params"})", "application/json"); @@ -578,18 +671,34 @@ int main(int argc, const char** argv) { LOG_DEBUG("%s\n", gen_params.to_string().c_str()); - sd_image_t init_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; - sd_image_t control_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; + sd_image_t init_image = {0, 0, 3, nullptr}; + sd_image_t control_image = {0, 0, 3, nullptr}; std::vector pmid_images; + auto get_resolved_width = [&gen_params, &default_gen_params]() -> int { + if (gen_params.width > 0) + return gen_params.width; + if (default_gen_params.width > 0) + return default_gen_params.width; + return 512; + }; + auto get_resolved_height = [&gen_params, &default_gen_params]() -> int { + if (gen_params.height > 0) + return gen_params.height; + if (default_gen_params.height > 0) + return default_gen_params.height; + return 512; + }; + std::vector ref_images; ref_images.reserve(images_bytes.size()); for (auto& bytes : images_bytes) { - int img_w = width; - int img_h = height; + int img_w; + int img_h; + uint8_t* raw_pixels = load_image_from_memory( reinterpret_cast(bytes.data()), - bytes.size(), + static_cast(bytes.size()), img_w, img_h, width, height, 3); @@ -598,22 +707,31 @@ int main(int argc, const char** argv) { } sd_image_t img{(uint32_t)img_w, (uint32_t)img_h, 3, raw_pixels}; + gen_params.set_width_and_height_if_unset(img.width, img.height); ref_images.push_back(img); } sd_image_t mask_image = {0}; if (!mask_bytes.empty()) { - int mask_w = width; - int mask_h = height; + int expected_width = 0; + int expected_height = 0; + if (gen_params.width_and_height_are_set()) { + expected_width = gen_params.width; + expected_height = gen_params.height; + } + int mask_w; + int mask_h; + uint8_t* mask_raw = load_image_from_memory( reinterpret_cast(mask_bytes.data()), - mask_bytes.size(), + static_cast(mask_bytes.size()), mask_w, mask_h, - width, height, 1); + expected_width, expected_height, 1); mask_image = {(uint32_t)mask_w, (uint32_t)mask_h, 1, mask_raw}; + gen_params.set_width_and_height_if_unset(mask_image.width, mask_image.height); } else { - mask_image.width = width; - mask_image.height = height; + mask_image.width = get_resolved_width(); + mask_image.height = get_resolved_height(); mask_image.channel = 1; mask_image.data = nullptr; } @@ -630,8 +748,8 @@ int main(int argc, const char** argv) { gen_params.auto_resize_ref_image, gen_params.increase_ref_index, mask_image, - gen_params.width, - gen_params.height, + get_resolved_width(), + get_resolved_height(), gen_params.sample_params, gen_params.strength, gen_params.seed, @@ -645,7 +763,7 @@ int main(int argc, const char** argv) { gen_params.pm_style_strength, }, // pm_params ctx_params.vae_tiling_params, - gen_params.easycache_params, + gen_params.cache_params, }; sd_image_t* results = nullptr; @@ -658,7 +776,7 @@ int main(int argc, const char** argv) { } json out; - out["created"] = iso_timestamp_now(); + out["created"] = static_cast(std::time(nullptr)); out["data"] = json::array(); out["output_format"] = output_format; @@ -676,6 +794,7 @@ int main(int argc, const char** argv) { item["b64_json"] = b64; out["data"].push_back(item); } + free_results(results, num_results); res.set_content(out.dump(), "application/json"); res.status = 200; @@ -698,6 +817,408 @@ int main(int argc, const char** argv) { } }); + // sdapi endpoints (AUTOMATIC1111 / Forge) + + auto sdapi_any2img = [&](const httplib::Request& req, httplib::Response& res, bool img2img) { + try { + if (req.body.empty()) { + res.status = 400; + res.set_content(R"({"error":"empty body"})", "application/json"); + return; + } + + json j = json::parse(req.body); + + std::string prompt = j.value("prompt", ""); + std::string negative_prompt = j.value("negative_prompt", ""); + int width = j.value("width", 512); + int height = j.value("height", 512); + int steps = j.value("steps", default_gen_params.sample_params.sample_steps); + float cfg_scale = j.value("cfg_scale", default_gen_params.sample_params.guidance.txt_cfg); + int64_t seed = j.value("seed", -1); + int batch_size = j.value("batch_size", 1); + int clip_skip = j.value("clip_skip", -1); + std::string sampler_name = j.value("sampler_name", ""); + std::string scheduler_name = j.value("scheduler", ""); + + auto bad = [&](const std::string& msg) { + res.status = 400; + res.set_content("{\"error\":\"" + msg + "\"}", "application/json"); + return; + }; + + if (width <= 0 || height <= 0) { + return bad("width and height must be positive"); + } + + if (steps < 1 || steps > 150) { + return bad("steps must be in range [1, 150]"); + } + + if (batch_size < 1 || batch_size > 8) { + return bad("batch_size must be in range [1, 8]"); + } + + if (cfg_scale < 0.f) { + return bad("cfg_scale must be positive"); + } + + if (prompt.empty()) { + return bad("prompt required"); + } + + std::vector sd_loras; + std::vector lora_path_storage; + + if (j.contains("lora") && j["lora"].is_array()) { + for (const auto& item : j["lora"]) { + if (!item.is_object()) { + continue; + } + + std::string path = item.value("path", ""); + float multiplier = item.value("multiplier", 1.0f); + bool is_high_noise = item.value("is_high_noise", false); + + if (path.empty()) { + return bad("lora.path required"); + } + + std::string fullpath = get_lora_full_path(path); + if (fullpath.empty()) { + return bad("invalid lora path: " + path); + } + + lora_path_storage.push_back(fullpath); + sd_lora_t l; + l.is_high_noise = is_high_noise; + l.multiplier = multiplier; + l.path = lora_path_storage.back().c_str(); + + sd_loras.push_back(l); + } + } + + auto get_sample_method = [](std::string name) -> enum sample_method_t { + enum sample_method_t result = str_to_sample_method(name.c_str()); + if (result != SAMPLE_METHOD_COUNT) return result; + // some applications use a hardcoded sampler list + std::transform(name.begin(), name.end(), name.begin(), + [](unsigned char c) { return std::tolower(c); }); + static const std::unordered_map hardcoded{ + {"euler a", EULER_A_SAMPLE_METHOD}, + {"k_euler_a", EULER_A_SAMPLE_METHOD}, + {"euler", EULER_SAMPLE_METHOD}, + {"k_euler", EULER_SAMPLE_METHOD}, + {"heun", HEUN_SAMPLE_METHOD}, + {"k_heun", HEUN_SAMPLE_METHOD}, + {"dpm2", DPM2_SAMPLE_METHOD}, + {"k_dpm_2", DPM2_SAMPLE_METHOD}, + {"lcm", LCM_SAMPLE_METHOD}, + {"ddim", DDIM_TRAILING_SAMPLE_METHOD}, + {"dpm++ 2m", DPMPP2M_SAMPLE_METHOD}, + {"k_dpmpp_2m", DPMPP2M_SAMPLE_METHOD}, + {"res multistep", RES_MULTISTEP_SAMPLE_METHOD}, + {"k_res_multistep", RES_MULTISTEP_SAMPLE_METHOD}, + {"res 2s", RES_2S_SAMPLE_METHOD}, + {"k_res_2s", RES_2S_SAMPLE_METHOD}}; + auto it = hardcoded.find(name); + if (it != hardcoded.end()) return it->second; + return SAMPLE_METHOD_COUNT; + }; + + enum sample_method_t sample_method = get_sample_method(sampler_name); + + enum scheduler_t scheduler = str_to_scheduler(scheduler_name.c_str()); + + SDGenerationParams gen_params = default_gen_params; + gen_params.prompt = prompt; + gen_params.negative_prompt = negative_prompt; + gen_params.seed = seed; + gen_params.sample_params.sample_steps = steps; + gen_params.batch_count = batch_size; + gen_params.sample_params.guidance.txt_cfg = cfg_scale; + + if (clip_skip > 0) { + gen_params.clip_skip = clip_skip; + } + + if (sample_method != SAMPLE_METHOD_COUNT) { + gen_params.sample_params.sample_method = sample_method; + } + + if (scheduler != SCHEDULER_COUNT) { + gen_params.sample_params.scheduler = scheduler; + } + + // re-read to avoid applying 512 as default before the provided + // images and/or server command-line + gen_params.width = j.value("width", -1); + gen_params.height = j.value("height", -1); + + LOG_DEBUG("%s\n", gen_params.to_string().c_str()); + + sd_image_t init_image = {0, 0, 3, nullptr}; + sd_image_t control_image = {0, 0, 3, nullptr}; + sd_image_t mask_image = {0, 0, 1, nullptr}; + std::vector mask_data; + std::vector pmid_images; + std::vector ref_images; + + auto get_resolved_width = [&gen_params, &default_gen_params]() -> int { + if (gen_params.width > 0) + return gen_params.width; + if (default_gen_params.width > 0) + return default_gen_params.width; + return 512; + }; + auto get_resolved_height = [&gen_params, &default_gen_params]() -> int { + if (gen_params.height > 0) + return gen_params.height; + if (default_gen_params.height > 0) + return default_gen_params.height; + return 512; + }; + + auto decode_image = [&gen_params](sd_image_t& image, std::string encoded) -> bool { + // remove data URI prefix if present ("data:image/png;base64,") + auto comma_pos = encoded.find(','); + if (comma_pos != std::string::npos) { + encoded = encoded.substr(comma_pos + 1); + } + std::vector img_data = base64_decode(encoded); + if (!img_data.empty()) { + int expected_width = 0; + int expected_height = 0; + if (gen_params.width_and_height_are_set()) { + expected_width = gen_params.width; + expected_height = gen_params.height; + } + int img_w; + int img_h; + + uint8_t* raw_data = load_image_from_memory( + (const char*)img_data.data(), (int)img_data.size(), + img_w, img_h, + expected_width, expected_height, image.channel); + if (raw_data) { + image = {(uint32_t)img_w, (uint32_t)img_h, image.channel, raw_data}; + gen_params.set_width_and_height_if_unset(image.width, image.height); + return true; + } + } + return false; + }; + + if (img2img) { + if (j.contains("init_images") && j["init_images"].is_array() && !j["init_images"].empty()) { + std::string encoded = j["init_images"][0].get(); + decode_image(init_image, encoded); + } + + if (j.contains("mask") && j["mask"].is_string()) { + std::string encoded = j["mask"].get(); + decode_image(mask_image, encoded); + bool inpainting_mask_invert = j.value("inpainting_mask_invert", 0) != 0; + if (inpainting_mask_invert && mask_image.data != nullptr) { + for (uint32_t i = 0; i < mask_image.width * mask_image.height; i++) { + mask_image.data[i] = 255 - mask_image.data[i]; + } + } + } else { + int m_width = get_resolved_width(); + int m_height = get_resolved_height(); + mask_data = std::vector(m_width * m_height, 255); + mask_image.width = m_width; + mask_image.height = m_height; + mask_image.channel = 1; + mask_image.data = mask_data.data(); + } + + float denoising_strength = j.value("denoising_strength", -1.f); + if (denoising_strength >= 0.f) { + denoising_strength = std::min(denoising_strength, 1.0f); + gen_params.strength = denoising_strength; + } + } + + if (j.contains("extra_images") && j["extra_images"].is_array()) { + for (auto extra_image : j["extra_images"]) { + std::string encoded = extra_image.get(); + sd_image_t tmp_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; + if (decode_image(tmp_image, encoded)) { + ref_images.push_back(tmp_image); + } + } + } + + sd_img_gen_params_t img_gen_params = { + sd_loras.data(), + static_cast(sd_loras.size()), + gen_params.prompt.c_str(), + gen_params.negative_prompt.c_str(), + gen_params.clip_skip, + init_image, + ref_images.data(), + (int)ref_images.size(), + gen_params.auto_resize_ref_image, + gen_params.increase_ref_index, + mask_image, + get_resolved_width(), + get_resolved_height(), + gen_params.sample_params, + gen_params.strength, + gen_params.seed, + gen_params.batch_count, + control_image, + gen_params.control_strength, + { + pmid_images.data(), + (int)pmid_images.size(), + gen_params.pm_id_embed_path.c_str(), + gen_params.pm_style_strength, + }, // pm_params + ctx_params.vae_tiling_params, + gen_params.cache_params, + }; + + sd_image_t* results = nullptr; + int num_results = 0; + + { + std::lock_guard lock(sd_ctx_mutex); + results = generate_image(sd_ctx, &img_gen_params); + num_results = gen_params.batch_count; + } + + json out; + out["images"] = json::array(); + out["parameters"] = j; // TODO should return changed defaults + out["info"] = ""; + + for (int i = 0; i < num_results; i++) { + if (results[i].data == nullptr) { + continue; + } + + auto image_bytes = write_image_to_vector(ImageFormat::PNG, + results[i].data, + results[i].width, + results[i].height, + results[i].channel); + + if (image_bytes.empty()) { + LOG_ERROR("write image to mem failed"); + continue; + } + + std::string b64 = base64_encode(image_bytes); + out["images"].push_back(b64); + } + free_results(results, num_results); + + res.set_content(out.dump(), "application/json"); + res.status = 200; + + if (init_image.data) { + stbi_image_free(init_image.data); + } + if (mask_image.data && mask_data.empty()) { + stbi_image_free(mask_image.data); + } + for (auto ref_image : ref_images) { + stbi_image_free(ref_image.data); + } + + } catch (const std::exception& e) { + res.status = 500; + json err; + err["error"] = "server_error"; + err["message"] = e.what(); + res.set_content(err.dump(), "application/json"); + } + }; + + svr.Post("/sdapi/v1/txt2img", [&](const httplib::Request& req, httplib::Response& res) { + sdapi_any2img(req, res, false); + }); + + svr.Post("/sdapi/v1/img2img", [&](const httplib::Request& req, httplib::Response& res) { + sdapi_any2img(req, res, true); + }); + + svr.Get("/sdapi/v1/loras", [&](const httplib::Request&, httplib::Response& res) { + refresh_lora_cache(); + + json result = json::array(); + { + std::lock_guard lock(lora_mutex); + for (const auto& e : lora_cache) { + json item; + item["name"] = e.name; + item["path"] = e.path; + result.push_back(item); + } + } + + res.set_content(result.dump(), "application/json"); + }); + + svr.Get("/sdapi/v1/samplers", [&](const httplib::Request&, httplib::Response& res) { + std::vector sampler_names; + sampler_names.push_back("default"); + for (int i = 0; i < SAMPLE_METHOD_COUNT; i++) { + sampler_names.push_back(sd_sample_method_name((sample_method_t)i)); + } + json r = json::array(); + for (auto name : sampler_names) { + json entry; + entry["name"] = name; + entry["aliases"] = json::array({name}); + entry["options"] = json::object(); + r.push_back(entry); + } + res.set_content(r.dump(), "application/json"); + }); + + svr.Get("/sdapi/v1/schedulers", [&](const httplib::Request&, httplib::Response& res) { + std::vector scheduler_names; + scheduler_names.push_back("default"); + for (int i = 0; i < SCHEDULER_COUNT; i++) { + scheduler_names.push_back(sd_scheduler_name((scheduler_t)i)); + } + json r = json::array(); + for (auto name : scheduler_names) { + json entry; + entry["name"] = name; + entry["label"] = name; + r.push_back(entry); + } + res.set_content(r.dump(), "application/json"); + }); + + svr.Get("/sdapi/v1/sd-models", [&](const httplib::Request&, httplib::Response& res) { + fs::path model_path = ctx_params.model_path; + json entry; + entry["title"] = model_path.stem(); + entry["model_name"] = model_path.stem(); + entry["filename"] = model_path.filename(); + entry["hash"] = "8888888888"; + entry["sha256"] = "8888888888888888888888888888888888888888888888888888888888888888"; + entry["config"] = nullptr; + json r = json::array(); + r.push_back(entry); + res.set_content(r.dump(), "application/json"); + }); + + svr.Get("/sdapi/v1/options", [&](const httplib::Request&, httplib::Response& res) { + fs::path model_path = ctx_params.model_path; + json r; + r["samples_format"] = "png"; + r["sd_model_checkpoint"] = model_path.stem(); + res.set_content(r.dump(), "application/json"); + }); + LOG_INFO("listening on: %s:%d\n", svr_params.listen_ip.c_str(), svr_params.listen_port); svr.listen(svr_params.listen_ip, svr_params.listen_port); diff --git a/format-code.sh b/format-code.sh index d2a75bdc..ac5fd340 100644 --- a/format-code.sh +++ b/format-code.sh @@ -1,4 +1,4 @@ -for f in *.cpp *.h *.hpp examples/cli/*.cpp examples/common/*.hpp examples/cli/*.h examples/server/*.cpp; do +for f in src/*.cpp src/*.h src/*.hpp src/vocab/*.h src/vocab/*.cpp examples/cli/*.cpp examples/common/*.hpp examples/cli/*.h examples/server/*.cpp; do [[ "$f" == vocab* ]] && continue echo "formatting '$f'" # if [ "$f" != "stable-diffusion.h" ]; then diff --git a/ggml b/ggml index f5425c0e..a8db410a 160000 --- a/ggml +++ b/ggml @@ -1 +1 @@ -Subproject commit f5425c0ee5e582a7d64411f06139870bff3e52e0 +Subproject commit a8db410a252c8c8f2d120c6f2e7133ebe032f35d diff --git a/stable-diffusion.h b/include/stable-diffusion.h similarity index 89% rename from stable-diffusion.h rename to include/stable-diffusion.h index 9266ba43..51b2b329 100644 --- a/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -48,6 +48,8 @@ enum sample_method_t { LCM_SAMPLE_METHOD, DDIM_TRAILING_SAMPLE_METHOD, TCD_SAMPLE_METHOD, + RES_MULTISTEP_SAMPLE_METHOD, + RES_2S_SAMPLE_METHOD, SAMPLE_METHOD_COUNT }; @@ -60,7 +62,9 @@ enum scheduler_t { SGM_UNIFORM_SCHEDULER, SIMPLE_SCHEDULER, SMOOTHSTEP_SCHEDULER, + KL_OPTIMAL_SCHEDULER, LCM_SCHEDULER, + BONG_TANGENT_SCHEDULER, SCHEDULER_COUNT }; @@ -181,18 +185,22 @@ typedef struct { enum prediction_t prediction; enum lora_apply_mode_t lora_apply_mode; bool offload_params_to_cpu; + bool enable_mmap; bool keep_clip_on_cpu; bool keep_control_net_on_cpu; bool keep_vae_on_cpu; + bool flash_attn; bool diffusion_flash_attn; bool tae_preview_only; bool diffusion_conv_direct; bool vae_conv_direct; + bool circular_x; + bool circular_y; bool force_sdxl_vae_conv_scale; bool chroma_use_dit_mask; bool chroma_use_t5_mask; int chroma_t5_mask_pad; - float flow_shift; + bool qwen_image_zero_cond_t; } sd_ctx_params_t; typedef struct { @@ -226,6 +234,7 @@ typedef struct { int shifted_timestep; float* custom_sigmas; int custom_sigmas_count; + float flow_shift; } sd_sample_params_t; typedef struct { @@ -235,12 +244,34 @@ typedef struct { float style_strength; } sd_pm_params_t; // photo maker +enum sd_cache_mode_t { + SD_CACHE_DISABLED = 0, + SD_CACHE_EASYCACHE, + SD_CACHE_UCACHE, + SD_CACHE_DBCACHE, + SD_CACHE_TAYLORSEER, + SD_CACHE_CACHE_DIT, +}; + typedef struct { - bool enabled; + enum sd_cache_mode_t mode; float reuse_threshold; float start_percent; float end_percent; -} sd_easycache_params_t; + float error_decay_rate; + bool use_relative_threshold; + bool reset_error_on_compute; + int Fn_compute_blocks; + int Bn_compute_blocks; + float residual_diff_threshold; + int max_warmup_steps; + int max_cached_steps; + int max_continuous_cached_steps; + int taylorseer_n_derivatives; + int taylorseer_skip_interval; + const char* scm_mask; + bool scm_policy_dynamic; +} sd_cache_params_t; typedef struct { bool is_high_noise; @@ -270,7 +301,7 @@ typedef struct { float control_strength; sd_pm_params_t pm_params; sd_tiling_params_t vae_tiling_params; - sd_easycache_params_t easycache; + sd_cache_params_t cache; } sd_img_gen_params_t; typedef struct { @@ -292,7 +323,8 @@ typedef struct { int64_t seed; int video_frames; float vace_strength; - sd_easycache_params_t easycache; + sd_tiling_params_t vae_tiling_params; + sd_cache_params_t cache; } sd_vid_gen_params_t; typedef struct sd_ctx_t sd_ctx_t; @@ -322,7 +354,7 @@ SD_API enum preview_t str_to_preview(const char* str); SD_API const char* sd_lora_apply_mode_name(enum lora_apply_mode_t mode); SD_API enum lora_apply_mode_t str_to_lora_apply_mode(const char* str); -SD_API void sd_easycache_params_init(sd_easycache_params_t* easycache_params); +SD_API void sd_cache_params_init(sd_cache_params_t* cache_params); SD_API void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params); SD_API char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params); @@ -334,7 +366,7 @@ SD_API void sd_sample_params_init(sd_sample_params_t* sample_params); SD_API char* sd_sample_params_to_str(const sd_sample_params_t* sample_params); SD_API enum sample_method_t sd_get_default_sample_method(const sd_ctx_t* sd_ctx); -SD_API enum scheduler_t sd_get_default_scheduler(const sd_ctx_t* sd_ctx); +SD_API enum scheduler_t sd_get_default_scheduler(const sd_ctx_t* sd_ctx, enum sample_method_t sample_method); SD_API void sd_img_gen_params_init(sd_img_gen_params_t* sd_img_gen_params); SD_API char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params); @@ -362,7 +394,8 @@ SD_API bool convert(const char* input_path, const char* vae_path, const char* output_path, enum sd_type_t output_type, - const char* tensor_type_rules); + const char* tensor_type_rules, + bool convert_name); SD_API bool preprocess_canny(sd_image_t image, float high_threshold, diff --git a/face_detect.py b/script/face_detect.py similarity index 97% rename from face_detect.py rename to script/face_detect.py index 7131af31..e7a3eae1 100644 --- a/face_detect.py +++ b/script/face_detect.py @@ -1,88 +1,88 @@ -import os -import sys - -import numpy as np -import torch -from diffusers.utils import load_image -# pip install insightface==0.7.3 -from insightface.app import FaceAnalysis -from insightface.data import get_image as ins_get_image -from safetensors.torch import save_file - -### -# https://github.com/cubiq/ComfyUI_IPAdapter_plus/issues/165#issue-2055829543 -### -class FaceAnalysis2(FaceAnalysis): - # NOTE: allows setting det_size for each detection call. - # the model allows it but the wrapping code from insightface - # doesn't show it, and people end up loading duplicate models - # for different sizes where there is absolutely no need to - def get(self, img, max_num=0, det_size=(640, 640)): - if det_size is not None: - self.det_model.input_size = det_size - - return super().get(img, max_num) - -def analyze_faces(face_analysis: FaceAnalysis, img_data: np.ndarray, det_size=(640, 640)): - # NOTE: try detect faces, if no faces detected, lower det_size until it does - detection_sizes = [None] + [(size, size) for size in range(640, 256, -64)] + [(256, 256)] - - for size in detection_sizes: - faces = face_analysis.get(img_data, det_size=size) - if len(faces) > 0: - return faces - - return [] - -if __name__ == "__main__": - #face_detector = FaceAnalysis2(providers=['CUDAExecutionProvider'], allowed_modules=['detection', 'recognition']) - face_detector = FaceAnalysis2(providers=['CPUExecutionProvider'], allowed_modules=['detection', 'recognition']) - face_detector.prepare(ctx_id=0, det_size=(640, 640)) - #input_folder_name = './scarletthead_woman' - input_folder_name = sys.argv[1] - image_basename_list = os.listdir(input_folder_name) - image_path_list = sorted([os.path.join(input_folder_name, basename) for basename in image_basename_list]) - - input_id_images = [] - for image_path in image_path_list: - input_id_images.append(load_image(image_path)) - - id_embed_list = [] - - for img in input_id_images: - img = np.array(img) - img = img[:, :, ::-1] - faces = analyze_faces(face_detector, img) - if len(faces) > 0: - id_embed_list.append(torch.from_numpy((faces[0]['embedding']))) - - if len(id_embed_list) == 0: - raise ValueError(f"No face detected in input image pool") - - id_embeds = torch.stack(id_embed_list) - - # for r in id_embeds: - # print(r) - # #torch.save(id_embeds, input_folder_name+'/id_embeds.pt'); - # weights = dict() - # weights["id_embeds"] = id_embeds - # save_file(weights, input_folder_name+'/id_embeds.safetensors') - - binary_data = id_embeds.numpy().tobytes() - two = 4 - zero = 0 - one = 1 - tensor_name = "id_embeds" -# Write binary data to a file - with open(input_folder_name+'/id_embeds.bin', "wb") as f: - f.write(two.to_bytes(4, byteorder='little')) - f.write((len(tensor_name)).to_bytes(4, byteorder='little')) - f.write(zero.to_bytes(4, byteorder='little')) - f.write((id_embeds.shape[1]).to_bytes(4, byteorder='little')) - f.write((id_embeds.shape[0]).to_bytes(4, byteorder='little')) - f.write(one.to_bytes(4, byteorder='little')) - f.write(one.to_bytes(4, byteorder='little')) - f.write(tensor_name.encode('ascii')) - f.write(binary_data) - +import os +import sys + +import numpy as np +import torch +from diffusers.utils import load_image +# pip install insightface==0.7.3 +from insightface.app import FaceAnalysis +from insightface.data import get_image as ins_get_image +from safetensors.torch import save_file + +### +# https://github.com/cubiq/ComfyUI_IPAdapter_plus/issues/165#issue-2055829543 +### +class FaceAnalysis2(FaceAnalysis): + # NOTE: allows setting det_size for each detection call. + # the model allows it but the wrapping code from insightface + # doesn't show it, and people end up loading duplicate models + # for different sizes where there is absolutely no need to + def get(self, img, max_num=0, det_size=(640, 640)): + if det_size is not None: + self.det_model.input_size = det_size + + return super().get(img, max_num) + +def analyze_faces(face_analysis: FaceAnalysis, img_data: np.ndarray, det_size=(640, 640)): + # NOTE: try detect faces, if no faces detected, lower det_size until it does + detection_sizes = [None] + [(size, size) for size in range(640, 256, -64)] + [(256, 256)] + + for size in detection_sizes: + faces = face_analysis.get(img_data, det_size=size) + if len(faces) > 0: + return faces + + return [] + +if __name__ == "__main__": + #face_detector = FaceAnalysis2(providers=['CUDAExecutionProvider'], allowed_modules=['detection', 'recognition']) + face_detector = FaceAnalysis2(providers=['CPUExecutionProvider'], allowed_modules=['detection', 'recognition']) + face_detector.prepare(ctx_id=0, det_size=(640, 640)) + #input_folder_name = './scarletthead_woman' + input_folder_name = sys.argv[1] + image_basename_list = os.listdir(input_folder_name) + image_path_list = sorted([os.path.join(input_folder_name, basename) for basename in image_basename_list]) + + input_id_images = [] + for image_path in image_path_list: + input_id_images.append(load_image(image_path)) + + id_embed_list = [] + + for img in input_id_images: + img = np.array(img) + img = img[:, :, ::-1] + faces = analyze_faces(face_detector, img) + if len(faces) > 0: + id_embed_list.append(torch.from_numpy((faces[0]['embedding']))) + + if len(id_embed_list) == 0: + raise ValueError(f"No face detected in input image pool") + + id_embeds = torch.stack(id_embed_list) + + # for r in id_embeds: + # print(r) + # #torch.save(id_embeds, input_folder_name+'/id_embeds.pt'); + # weights = dict() + # weights["id_embeds"] = id_embeds + # save_file(weights, input_folder_name+'/id_embeds.safetensors') + + binary_data = id_embeds.numpy().tobytes() + two = 4 + zero = 0 + one = 1 + tensor_name = "id_embeds" +# Write binary data to a file + with open(input_folder_name+'/id_embeds.bin', "wb") as f: + f.write(two.to_bytes(4, byteorder='little')) + f.write((len(tensor_name)).to_bytes(4, byteorder='little')) + f.write(zero.to_bytes(4, byteorder='little')) + f.write((id_embeds.shape[1]).to_bytes(4, byteorder='little')) + f.write((id_embeds.shape[0]).to_bytes(4, byteorder='little')) + f.write(one.to_bytes(4, byteorder='little')) + f.write(one.to_bytes(4, byteorder='little')) + f.write(tensor_name.encode('ascii')) + f.write(binary_data) + \ No newline at end of file diff --git a/src/anima.hpp b/src/anima.hpp new file mode 100644 index 00000000..191a096d --- /dev/null +++ b/src/anima.hpp @@ -0,0 +1,686 @@ +#ifndef __ANIMA_HPP__ +#define __ANIMA_HPP__ + +#include +#include +#include +#include + +#include "common_block.hpp" +#include "flux.hpp" +#include "rope.hpp" + +namespace Anima { + constexpr int ANIMA_GRAPH_SIZE = 65536; + + __STATIC_INLINE__ struct ggml_tensor* apply_gate(struct ggml_context* ctx, + struct ggml_tensor* x, + struct ggml_tensor* gate) { + gate = ggml_reshape_3d(ctx, gate, gate->ne[0], 1, gate->ne[1]); // [N, 1, C] + return ggml_mul(ctx, x, gate); + } + + struct XEmbedder : public GGMLBlock { + public: + XEmbedder(int64_t in_dim, int64_t out_dim) { + blocks["proj.1"] = std::make_shared(in_dim, out_dim, false); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { + auto proj = std::dynamic_pointer_cast(blocks["proj.1"]); + return proj->forward(ctx, x); + } + }; + + struct TimestepEmbedder : public GGMLBlock { + public: + TimestepEmbedder(int64_t in_dim, int64_t out_dim) { + blocks["1.linear_1"] = std::make_shared(in_dim, in_dim, false); + blocks["1.linear_2"] = std::make_shared(in_dim, out_dim, false); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { + auto linear_1 = std::dynamic_pointer_cast(blocks["1.linear_1"]); + auto linear_2 = std::dynamic_pointer_cast(blocks["1.linear_2"]); + + x = linear_1->forward(ctx, x); + x = ggml_silu_inplace(ctx->ggml_ctx, x); + x = linear_2->forward(ctx, x); + return x; + } + }; + + struct AdaLayerNormZero : public GGMLBlock { + protected: + int64_t in_features; + + public: + AdaLayerNormZero(int64_t in_features, int64_t hidden_features = 256) + : in_features(in_features) { + blocks["norm"] = std::make_shared(in_features, 1e-6f, false, false); + blocks["1"] = std::make_shared(in_features, hidden_features, false); + blocks["2"] = std::make_shared(hidden_features, 3 * in_features, false); + } + + std::pair forward(GGMLRunnerContext* ctx, + struct ggml_tensor* hidden_states, + struct ggml_tensor* embedded_timestep, + struct ggml_tensor* temb = nullptr) { + auto norm = std::dynamic_pointer_cast(blocks["norm"]); + auto linear_1 = std::dynamic_pointer_cast(blocks["1"]); + auto linear_2 = std::dynamic_pointer_cast(blocks["2"]); + + auto emb = ggml_silu(ctx->ggml_ctx, embedded_timestep); + emb = linear_1->forward(ctx, emb); + emb = linear_2->forward(ctx, emb); // [N, 3*C] + + if (temb != nullptr) { + emb = ggml_add(ctx->ggml_ctx, emb, temb); + } + + auto emb_chunks = ggml_ext_chunk(ctx->ggml_ctx, emb, 3, 0); + auto shift = emb_chunks[0]; + auto scale = emb_chunks[1]; + auto gate = emb_chunks[2]; + + auto x = norm->forward(ctx, hidden_states); + x = Flux::modulate(ctx->ggml_ctx, x, shift, scale); + + return {x, gate}; + } + }; + + struct AdaLayerNorm : public GGMLBlock { + protected: + int64_t embedding_dim; + + public: + AdaLayerNorm(int64_t in_features, int64_t hidden_features = 256) + : embedding_dim(in_features) { + blocks["norm"] = std::make_shared(in_features, 1e-6f, false, false); + blocks["1"] = std::make_shared(in_features, hidden_features, false); + blocks["2"] = std::make_shared(hidden_features, 2 * in_features, false); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* hidden_states, + struct ggml_tensor* embedded_timestep, + struct ggml_tensor* temb = nullptr) { + auto norm = std::dynamic_pointer_cast(blocks["norm"]); + auto linear_1 = std::dynamic_pointer_cast(blocks["1"]); + auto linear_2 = std::dynamic_pointer_cast(blocks["2"]); + + auto emb = ggml_silu(ctx->ggml_ctx, embedded_timestep); + emb = linear_1->forward(ctx, emb); + emb = linear_2->forward(ctx, emb); // [N, 2*C] + + if (temb != nullptr) { + auto temb_2c = ggml_view_2d(ctx->ggml_ctx, temb, 2 * embedding_dim, temb->ne[1], temb->nb[1], 0); + emb = ggml_add(ctx->ggml_ctx, emb, temb_2c); + } + + auto emb_chunks = ggml_ext_chunk(ctx->ggml_ctx, emb, 2, 0); + auto shift = emb_chunks[0]; + auto scale = emb_chunks[1]; + + auto x = norm->forward(ctx, hidden_states); + x = Flux::modulate(ctx->ggml_ctx, x, shift, scale); + return x; + } + }; + + struct AnimaAttention : public GGMLBlock { + protected: + int64_t num_heads; + int64_t head_dim; + std::string out_proj_name; + + public: + AnimaAttention(int64_t query_dim, + int64_t context_dim, + int64_t num_heads, + int64_t head_dim, + const std::string& out_proj_name = "output_proj") + : num_heads(num_heads), head_dim(head_dim), out_proj_name(out_proj_name) { + int64_t inner_dim = num_heads * head_dim; + + blocks["q_proj"] = std::make_shared(query_dim, inner_dim, false); + blocks["k_proj"] = std::make_shared(context_dim, inner_dim, false); + blocks["v_proj"] = std::make_shared(context_dim, inner_dim, false); + blocks["q_norm"] = std::make_shared(head_dim, 1e-6f); + blocks["k_norm"] = std::make_shared(head_dim, 1e-6f); + blocks[this->out_proj_name] = std::make_shared(inner_dim, query_dim, false); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* hidden_states, + struct ggml_tensor* encoder_hidden_states = nullptr, + struct ggml_tensor* pe_q = nullptr, + struct ggml_tensor* pe_k = nullptr) { + if (encoder_hidden_states == nullptr) { + encoder_hidden_states = hidden_states; + } + + auto q_proj = std::dynamic_pointer_cast(blocks["q_proj"]); + auto k_proj = std::dynamic_pointer_cast(blocks["k_proj"]); + auto v_proj = std::dynamic_pointer_cast(blocks["v_proj"]); + auto q_norm = std::dynamic_pointer_cast(blocks["q_norm"]); + auto k_norm = std::dynamic_pointer_cast(blocks["k_norm"]); + auto out_proj = std::dynamic_pointer_cast(blocks[out_proj_name]); + + auto q = q_proj->forward(ctx, hidden_states); + auto k = k_proj->forward(ctx, encoder_hidden_states); + auto v = v_proj->forward(ctx, encoder_hidden_states); + + int64_t N = q->ne[2]; + int64_t L_q = q->ne[1]; + int64_t L_k = k->ne[1]; + + auto q4 = ggml_reshape_4d(ctx->ggml_ctx, q, head_dim, num_heads, L_q, N); // [N, L_q, H, D] + auto k4 = ggml_reshape_4d(ctx->ggml_ctx, k, head_dim, num_heads, L_k, N); // [N, L_k, H, D] + auto v4 = ggml_reshape_4d(ctx->ggml_ctx, v, head_dim, num_heads, L_k, N); // [N, L_k, H, D] + + q4 = q_norm->forward(ctx, q4); + k4 = k_norm->forward(ctx, k4); + + struct ggml_tensor* attn_out = nullptr; + if (pe_q != nullptr || pe_k != nullptr) { + if (pe_q == nullptr) { + pe_q = pe_k; + } + if (pe_k == nullptr) { + pe_k = pe_q; + } + auto q_rope = Rope::apply_rope(ctx->ggml_ctx, q4, pe_q, false); + auto k_rope = Rope::apply_rope(ctx->ggml_ctx, k4, pe_k, false); + attn_out = ggml_ext_attention_ext(ctx->ggml_ctx, + ctx->backend, + q_rope, + k_rope, + v4, + num_heads, + nullptr, + true, + ctx->flash_attn_enabled); + } else { + auto q_flat = ggml_reshape_3d(ctx->ggml_ctx, q4, head_dim * num_heads, L_q, N); + auto k_flat = ggml_reshape_3d(ctx->ggml_ctx, k4, head_dim * num_heads, L_k, N); + attn_out = ggml_ext_attention_ext(ctx->ggml_ctx, + ctx->backend, + q_flat, + k_flat, + v, + num_heads, + nullptr, + false, + ctx->flash_attn_enabled); + } + + return out_proj->forward(ctx, attn_out); + } + }; + + struct AnimaMLP : public GGMLBlock { + public: + AnimaMLP(int64_t dim, int64_t hidden_dim) { + blocks["layer1"] = std::make_shared(dim, hidden_dim, false); + blocks["layer2"] = std::make_shared(hidden_dim, dim, false); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { + auto layer1 = std::dynamic_pointer_cast(blocks["layer1"]); + auto layer2 = std::dynamic_pointer_cast(blocks["layer2"]); + + x = layer1->forward(ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); + x = layer2->forward(ctx, x); + return x; + } + }; + + struct AdapterMLP : public GGMLBlock { + public: + AdapterMLP(int64_t dim, int64_t hidden_dim) { + blocks["0"] = std::make_shared(dim, hidden_dim, true); + blocks["2"] = std::make_shared(hidden_dim, dim, true); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { + auto layer0 = std::dynamic_pointer_cast(blocks["0"]); + auto layer2 = std::dynamic_pointer_cast(blocks["2"]); + + x = layer0->forward(ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); + x = layer2->forward(ctx, x); + return x; + } + }; + + struct LLMAdapterBlock : public GGMLBlock { + public: + LLMAdapterBlock(int64_t model_dim = 1024, int64_t source_dim = 1024, int64_t num_heads = 16, int64_t head_dim = 64) { + blocks["norm_self_attn"] = std::make_shared(model_dim, 1e-6f); + blocks["self_attn"] = std::make_shared(model_dim, model_dim, num_heads, head_dim, "o_proj"); + blocks["norm_cross_attn"] = std::make_shared(model_dim, 1e-6f); + blocks["cross_attn"] = std::make_shared(model_dim, source_dim, num_heads, head_dim, "o_proj"); + blocks["norm_mlp"] = std::make_shared(model_dim, 1e-6f); + blocks["mlp"] = std::make_shared(model_dim, model_dim * 4); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* x, + struct ggml_tensor* context, + struct ggml_tensor* target_pe, + struct ggml_tensor* context_pe) { + auto norm_self_attn = std::dynamic_pointer_cast(blocks["norm_self_attn"]); + auto self_attn = std::dynamic_pointer_cast(blocks["self_attn"]); + auto norm_cross_attn = std::dynamic_pointer_cast(blocks["norm_cross_attn"]); + auto cross_attn = std::dynamic_pointer_cast(blocks["cross_attn"]); + auto norm_mlp = std::dynamic_pointer_cast(blocks["norm_mlp"]); + auto mlp = std::dynamic_pointer_cast(blocks["mlp"]); + + auto h = norm_self_attn->forward(ctx, x); + h = self_attn->forward(ctx, h, nullptr, target_pe, target_pe); + x = ggml_add(ctx->ggml_ctx, x, h); + + h = norm_cross_attn->forward(ctx, x); + h = cross_attn->forward(ctx, h, context, target_pe, context_pe); + x = ggml_add(ctx->ggml_ctx, x, h); + + h = norm_mlp->forward(ctx, x); + h = mlp->forward(ctx, h); + x = ggml_add(ctx->ggml_ctx, x, h); + + return x; + } + }; + + struct LLMAdapter : public GGMLBlock { + protected: + int num_layers; + + public: + LLMAdapter(int64_t source_dim = 1024, + int64_t target_dim = 1024, + int64_t model_dim = 1024, + int num_layers = 6, + int num_heads = 16) + : num_layers(num_layers) { + int64_t head_dim = model_dim / num_heads; + + blocks["embed"] = std::make_shared(32128, target_dim); + for (int i = 0; i < num_layers; i++) { + blocks["blocks." + std::to_string(i)] = + std::make_shared(model_dim, source_dim, num_heads, head_dim); + } + blocks["out_proj"] = std::make_shared(model_dim, target_dim, true); + blocks["norm"] = std::make_shared(target_dim, 1e-6f); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* source_hidden_states, + struct ggml_tensor* target_input_ids, + struct ggml_tensor* target_pe, + struct ggml_tensor* source_pe) { + GGML_ASSERT(target_input_ids != nullptr); + if (ggml_n_dims(target_input_ids) == 1) { + target_input_ids = ggml_reshape_2d(ctx->ggml_ctx, target_input_ids, target_input_ids->ne[0], 1); + } + + auto embed = std::dynamic_pointer_cast(blocks["embed"]); + auto out_proj = std::dynamic_pointer_cast(blocks["out_proj"]); + auto norm = std::dynamic_pointer_cast(blocks["norm"]); + + auto x = embed->forward(ctx, target_input_ids); // [N, target_len, target_dim] + + for (int i = 0; i < num_layers; i++) { + auto block = std::dynamic_pointer_cast(blocks["blocks." + std::to_string(i)]); + x = block->forward(ctx, x, source_hidden_states, target_pe, source_pe); + } + + x = out_proj->forward(ctx, x); + x = norm->forward(ctx, x); + return x; + } + }; + + struct TransformerBlock : public GGMLBlock { + public: + TransformerBlock(int64_t hidden_size, + int64_t text_embed_dim, + int64_t num_heads, + int64_t head_dim, + int64_t mlp_ratio = 4, + int64_t adaln_lora_dim = 256) { + blocks["adaln_modulation_self_attn"] = std::make_shared(hidden_size, adaln_lora_dim); + blocks["self_attn"] = std::make_shared(hidden_size, hidden_size, num_heads, head_dim); + blocks["adaln_modulation_cross_attn"] = std::make_shared(hidden_size, adaln_lora_dim); + blocks["cross_attn"] = std::make_shared(hidden_size, text_embed_dim, num_heads, head_dim); + blocks["adaln_modulation_mlp"] = std::make_shared(hidden_size, adaln_lora_dim); + blocks["mlp"] = std::make_shared(hidden_size, hidden_size * mlp_ratio); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* hidden_states, + struct ggml_tensor* encoder_hidden_states, + struct ggml_tensor* embedded_timestep, + struct ggml_tensor* temb, + struct ggml_tensor* image_pe) { + auto norm1 = std::dynamic_pointer_cast(blocks["adaln_modulation_self_attn"]); + auto attn1 = std::dynamic_pointer_cast(blocks["self_attn"]); + auto norm2 = std::dynamic_pointer_cast(blocks["adaln_modulation_cross_attn"]); + auto attn2 = std::dynamic_pointer_cast(blocks["cross_attn"]); + auto norm3 = std::dynamic_pointer_cast(blocks["adaln_modulation_mlp"]); + auto mlp = std::dynamic_pointer_cast(blocks["mlp"]); + + auto [normed1, gate1] = norm1->forward(ctx, hidden_states, embedded_timestep, temb); + auto h = attn1->forward(ctx, normed1, nullptr, image_pe, image_pe); + hidden_states = ggml_add(ctx->ggml_ctx, hidden_states, apply_gate(ctx->ggml_ctx, h, gate1)); + + auto [normed2, gate2] = norm2->forward(ctx, hidden_states, embedded_timestep, temb); + h = attn2->forward(ctx, normed2, encoder_hidden_states, nullptr, nullptr); + hidden_states = ggml_add(ctx->ggml_ctx, hidden_states, apply_gate(ctx->ggml_ctx, h, gate2)); + + auto [normed3, gate3] = norm3->forward(ctx, hidden_states, embedded_timestep, temb); + h = mlp->forward(ctx, normed3); + hidden_states = ggml_add(ctx->ggml_ctx, hidden_states, apply_gate(ctx->ggml_ctx, h, gate3)); + + return hidden_states; + } + }; + + struct FinalLayer : public GGMLBlock { + protected: + int64_t hidden_size; + int64_t patch_size; + int64_t out_channels; + + public: + FinalLayer(int64_t hidden_size, int64_t patch_size, int64_t out_channels) + : hidden_size(hidden_size), patch_size(patch_size), out_channels(out_channels) { + blocks["adaln_modulation"] = std::make_shared(hidden_size, 256); + blocks["linear"] = std::make_shared(hidden_size, patch_size * patch_size * out_channels, false); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* hidden_states, + struct ggml_tensor* embedded_timestep, + struct ggml_tensor* temb) { + auto adaln = std::dynamic_pointer_cast(blocks["adaln_modulation"]); + auto linear = std::dynamic_pointer_cast(blocks["linear"]); + + hidden_states = adaln->forward(ctx, hidden_states, embedded_timestep, temb); + hidden_states = linear->forward(ctx, hidden_states); + return hidden_states; + } + }; + + struct AnimaNet : public GGMLBlock { + public: + int64_t in_channels = 16; + int64_t out_channels = 16; + int64_t hidden_size = 2048; + int64_t text_embed_dim = 1024; + int64_t num_heads = 16; + int64_t head_dim = 128; + int patch_size = 2; + int64_t num_layers = 28; + std::vector axes_dim = {44, 42, 42}; + int theta = 10000; + + public: + AnimaNet() = default; + explicit AnimaNet(int64_t num_layers) + : num_layers(num_layers) { + blocks["x_embedder"] = std::make_shared((in_channels + 1) * patch_size * patch_size, hidden_size); + blocks["t_embedder"] = std::make_shared(hidden_size, hidden_size * 3); + blocks["t_embedding_norm"] = std::make_shared(hidden_size, 1e-6f); + for (int i = 0; i < num_layers; i++) { + blocks["blocks." + std::to_string(i)] = std::make_shared(hidden_size, + text_embed_dim, + num_heads, + head_dim); + } + blocks["final_layer"] = std::make_shared(hidden_size, patch_size, out_channels); + blocks["llm_adapter"] = std::make_shared(1024, 1024, 1024, 6, 16); + } + + struct ggml_tensor* forward(GGMLRunnerContext* ctx, + struct ggml_tensor* x, + struct ggml_tensor* timestep, + struct ggml_tensor* encoder_hidden_states, + struct ggml_tensor* image_pe, + struct ggml_tensor* t5_ids = nullptr, + struct ggml_tensor* t5_weights = nullptr, + struct ggml_tensor* adapter_q_pe = nullptr, + struct ggml_tensor* adapter_k_pe = nullptr) { + GGML_ASSERT(x->ne[3] == 1); + + auto x_embedder = std::dynamic_pointer_cast(blocks["x_embedder"]); + auto t_embedder = std::dynamic_pointer_cast(blocks["t_embedder"]); + auto t_embedding_norm = std::dynamic_pointer_cast(blocks["t_embedding_norm"]); + auto final_layer = std::dynamic_pointer_cast(blocks["final_layer"]); + auto llm_adapter = std::dynamic_pointer_cast(blocks["llm_adapter"]); + + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; + + auto padding_mask = ggml_ext_zeros(ctx->ggml_ctx, x->ne[0], x->ne[1], 1, x->ne[3]); + x = ggml_concat(ctx->ggml_ctx, x, padding_mask, 2); // [N, C + 1, H, W] + + x = DiT::pad_and_patchify(ctx, x, patch_size, patch_size); // [N, h*w, (C+1)*ph*pw] + + x = x_embedder->forward(ctx, x); + + auto timestep_proj = ggml_ext_timestep_embedding(ctx->ggml_ctx, timestep, static_cast(hidden_size)); + auto temb = t_embedder->forward(ctx, timestep_proj); + auto embedded_timestep = t_embedding_norm->forward(ctx, timestep_proj); + + if (t5_ids != nullptr) { + auto adapted_context = llm_adapter->forward(ctx, encoder_hidden_states, t5_ids, adapter_q_pe, adapter_k_pe); + if (t5_weights != nullptr) { + auto w = t5_weights; + if (ggml_n_dims(w) == 1) { + w = ggml_reshape_3d(ctx->ggml_ctx, w, 1, w->ne[0], 1); + } + w = ggml_repeat_4d(ctx->ggml_ctx, w, adapted_context->ne[0], adapted_context->ne[1], adapted_context->ne[2], 1); + adapted_context = ggml_mul(ctx->ggml_ctx, adapted_context, w); + } + if (adapted_context->ne[1] < 512) { + auto pad_ctx = ggml_ext_zeros(ctx->ggml_ctx, + adapted_context->ne[0], + 512 - adapted_context->ne[1], + adapted_context->ne[2], + 1); + adapted_context = ggml_concat(ctx->ggml_ctx, adapted_context, pad_ctx, 1); + } else if (adapted_context->ne[1] > 512) { + adapted_context = ggml_ext_slice(ctx->ggml_ctx, adapted_context, 1, 0, 512); + } + encoder_hidden_states = adapted_context; + } + + for (int i = 0; i < num_layers; i++) { + auto block = std::dynamic_pointer_cast(blocks["blocks." + std::to_string(i)]); + x = block->forward(ctx, x, encoder_hidden_states, embedded_timestep, temb, image_pe); + } + + x = final_layer->forward(ctx, x, embedded_timestep, temb); // [N, h*w, ph*pw*C] + + x = DiT::unpatchify_and_crop(ctx->ggml_ctx, x, H, W, patch_size, patch_size, false); // [N, C, H, W] + + return x; + } + }; + + struct AnimaRunner : public GGMLRunner { + public: + std::vector image_pe_vec; + std::vector adapter_q_pe_vec; + std::vector adapter_k_pe_vec; + AnimaNet net; + + AnimaRunner(ggml_backend_t backend, + bool offload_params_to_cpu, + const String2TensorStorage& tensor_storage_map = {}, + const std::string prefix = "model.diffusion_model") + : GGMLRunner(backend, offload_params_to_cpu) { + int64_t num_layers = 0; + std::string layer_tag = prefix + ".net.blocks."; + for (const auto& kv : tensor_storage_map) { + const std::string& tensor_name = kv.first; + size_t pos = tensor_name.find(layer_tag); + if (pos == std::string::npos) { + continue; + } + size_t start = pos + layer_tag.size(); + size_t end = tensor_name.find('.', start); + if (end == std::string::npos) { + continue; + } + int64_t layer_id = atoll(tensor_name.substr(start, end - start).c_str()); + num_layers = std::max(num_layers, layer_id + 1); + } + if (num_layers <= 0) { + num_layers = 28; + } + LOG_INFO("anima net layers: %" PRId64, num_layers); + + net = AnimaNet(num_layers); + net.init(params_ctx, tensor_storage_map, prefix + ".net"); + } + + std::string get_desc() override { + return "anima"; + } + + void get_param_tensors(std::map& tensors, const std::string prefix) { + net.get_param_tensors(tensors, prefix + ".net"); + } + + static std::vector gen_1d_rope_pe_vec(int64_t seq_len, int dim, float theta = 10000.f) { + std::vector pos(seq_len); + for (int64_t i = 0; i < seq_len; i++) { + pos[i] = static_cast(i); + } + auto rope_emb = Rope::rope(pos, dim, theta); + return Rope::flatten(rope_emb); + } + + static float calc_ntk_factor(float extrapolation_ratio, int axis_dim) { + if (extrapolation_ratio == 1.0f || axis_dim <= 2) { + return 1.0f; + } + return std::pow(extrapolation_ratio, static_cast(axis_dim) / static_cast(axis_dim - 2)); + } + + static std::vector gen_anima_image_pe_vec(int bs, + int h, + int w, + int patch_size, + int theta, + const std::vector& axes_dim, + float h_extrapolation_ratio, + float w_extrapolation_ratio, + float t_extrapolation_ratio) { + static const std::vector empty_ref_latents; + auto ids = Rope::gen_flux_ids(h, + w, + patch_size, + bs, + static_cast(axes_dim.size()), + 0, + {}, + empty_ref_latents, + false, + 1.0f); + + std::vector axis_thetas = { + static_cast(theta) * calc_ntk_factor(t_extrapolation_ratio, axes_dim[0]), + static_cast(theta) * calc_ntk_factor(h_extrapolation_ratio, axes_dim[1]), + static_cast(theta) * calc_ntk_factor(w_extrapolation_ratio, axes_dim[2]), + }; + return Rope::embed_nd(ids, bs, axis_thetas, axes_dim); + } + + struct ggml_cgraph* build_graph(struct ggml_tensor* x, + struct ggml_tensor* timesteps, + struct ggml_tensor* context, + struct ggml_tensor* t5_ids = nullptr, + struct ggml_tensor* t5_weights = nullptr) { + GGML_ASSERT(x->ne[3] == 1); + struct ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE); + + x = to_backend(x); + timesteps = to_backend(timesteps); + context = to_backend(context); + t5_ids = to_backend(t5_ids); + t5_weights = to_backend(t5_weights); + + int64_t pad_h = (net.patch_size - x->ne[1] % net.patch_size) % net.patch_size; + int64_t pad_w = (net.patch_size - x->ne[0] % net.patch_size) % net.patch_size; + int64_t h_pad = x->ne[1] + pad_h; + int64_t w_pad = x->ne[0] + pad_w; + + image_pe_vec = gen_anima_image_pe_vec(1, + static_cast(h_pad), + static_cast(w_pad), + static_cast(net.patch_size), + net.theta, + net.axes_dim, + 4.0f, + 4.0f, + 1.0f); + int64_t image_pos_len = static_cast(image_pe_vec.size()) / (2 * 2 * (net.head_dim / 2)); + auto image_pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, net.head_dim / 2, image_pos_len); + set_backend_tensor_data(image_pe, image_pe_vec.data()); + + ggml_tensor* adapter_q_pe = nullptr; + ggml_tensor* adapter_k_pe = nullptr; + if (t5_ids != nullptr) { + int64_t target_len = t5_ids->ne[0]; + int64_t source_len = context->ne[1]; + + adapter_q_pe_vec = gen_1d_rope_pe_vec(target_len, 64, 10000.f); + adapter_k_pe_vec = gen_1d_rope_pe_vec(source_len, 64, 10000.f); + + int64_t target_pos_len = static_cast(adapter_q_pe_vec.size()) / (2 * 2 * 32); + int64_t source_pos_len = static_cast(adapter_k_pe_vec.size()) / (2 * 2 * 32); + + adapter_q_pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, 32, target_pos_len); + adapter_k_pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, 32, source_pos_len); + set_backend_tensor_data(adapter_q_pe, adapter_q_pe_vec.data()); + set_backend_tensor_data(adapter_k_pe, adapter_k_pe_vec.data()); + } + + auto runner_ctx = get_context(); + auto out = net.forward(&runner_ctx, + x, + timesteps, + context, + image_pe, + t5_ids, + t5_weights, + adapter_q_pe, + adapter_k_pe); + + ggml_build_forward_expand(gf, out); + return gf; + } + + bool compute(int n_threads, + struct ggml_tensor* x, + struct ggml_tensor* timesteps, + struct ggml_tensor* context, + struct ggml_tensor* t5_ids = nullptr, + struct ggml_tensor* t5_weights = nullptr, + struct ggml_tensor** output = nullptr, + struct ggml_context* output_ctx = nullptr) { + auto get_graph = [&]() -> struct ggml_cgraph* { + return build_graph(x, timesteps, context, t5_ids, t5_weights); + }; + return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); + } + }; +} // namespace Anima + +#endif // __ANIMA_HPP__ diff --git a/src/cache_dit.hpp b/src/cache_dit.hpp new file mode 100644 index 00000000..6fe104da --- /dev/null +++ b/src/cache_dit.hpp @@ -0,0 +1,975 @@ +#ifndef __CACHE_DIT_HPP__ +#define __CACHE_DIT_HPP__ + +#include +#include +#include +#include +#include +#include + +#include "ggml_extend.hpp" + +struct DBCacheConfig { + bool enabled = false; + int Fn_compute_blocks = 8; + int Bn_compute_blocks = 0; + float residual_diff_threshold = 0.08f; + int max_warmup_steps = 8; + int max_cached_steps = -1; + int max_continuous_cached_steps = -1; + float max_accumulated_residual_diff = -1.0f; + std::vector steps_computation_mask; + bool scm_policy_dynamic = true; +}; + +struct TaylorSeerConfig { + bool enabled = false; + int n_derivatives = 1; + int max_warmup_steps = 2; + int skip_interval_steps = 1; +}; + +struct CacheDitConfig { + DBCacheConfig dbcache; + TaylorSeerConfig taylorseer; + int double_Fn_blocks = -1; + int double_Bn_blocks = -1; + int single_Fn_blocks = -1; + int single_Bn_blocks = -1; +}; + +struct TaylorSeerState { + int n_derivatives = 1; + int current_step = -1; + int last_computed_step = -1; + std::vector> dY_prev; + std::vector> dY_current; + + void init(int n_deriv, size_t hidden_size) { + n_derivatives = n_deriv; + int order = n_derivatives + 1; + dY_prev.resize(order); + dY_current.resize(order); + for (int i = 0; i < order; i++) { + dY_prev[i].clear(); + dY_current[i].clear(); + } + current_step = -1; + last_computed_step = -1; + } + + void reset() { + for (auto& v : dY_prev) + v.clear(); + for (auto& v : dY_current) + v.clear(); + current_step = -1; + last_computed_step = -1; + } + + bool can_approximate() const { + return last_computed_step >= n_derivatives && !dY_prev.empty() && !dY_prev[0].empty(); + } + + void update_derivatives(const float* Y, size_t size, int step) { + int order = n_derivatives + 1; + dY_prev = dY_current; + dY_current[0].resize(size); + for (size_t i = 0; i < size; i++) { + dY_current[0][i] = Y[i]; + } + + int window = step - last_computed_step; + if (window <= 0) + window = 1; + + for (int d = 0; d < n_derivatives; d++) { + if (!dY_prev[d].empty() && dY_prev[d].size() == size) { + dY_current[d + 1].resize(size); + for (size_t i = 0; i < size; i++) { + dY_current[d + 1][i] = (dY_current[d][i] - dY_prev[d][i]) / static_cast(window); + } + } else { + dY_current[d + 1].clear(); + } + } + + current_step = step; + last_computed_step = step; + } + + void approximate(float* output, size_t size, int target_step) const { + if (!can_approximate() || dY_prev[0].size() != size) { + return; + } + + int elapsed = target_step - last_computed_step; + if (elapsed <= 0) + elapsed = 1; + + std::fill(output, output + size, 0.0f); + float factorial = 1.0f; + int order = static_cast(dY_prev.size()); + + for (int o = 0; o < order; o++) { + if (dY_prev[o].empty() || dY_prev[o].size() != size) + continue; + if (o > 0) + factorial *= static_cast(o); + float coeff = ::powf(static_cast(elapsed), static_cast(o)) / factorial; + for (size_t i = 0; i < size; i++) { + output[i] += coeff * dY_prev[o][i]; + } + } + } +}; + +struct BlockCacheEntry { + std::vector residual_img; + std::vector residual_txt; + std::vector residual; + std::vector prev_img; + std::vector prev_txt; + std::vector prev_output; + bool has_prev = false; +}; + +struct CacheDitState { + CacheDitConfig config; + bool initialized = false; + + int total_double_blocks = 0; + int total_single_blocks = 0; + size_t hidden_size = 0; + + int current_step = -1; + int total_steps = 0; + int warmup_remaining = 0; + std::vector cached_steps; + int continuous_cached_steps = 0; + float accumulated_residual_diff = 0.0f; + + std::vector double_block_cache; + std::vector single_block_cache; + + std::vector Fn_residual_img; + std::vector Fn_residual_txt; + std::vector prev_Fn_residual_img; + std::vector prev_Fn_residual_txt; + bool has_prev_Fn_residual = false; + + std::vector Bn_buffer_img; + std::vector Bn_buffer_txt; + std::vector Bn_buffer; + bool has_Bn_buffer = false; + + TaylorSeerState taylor_state; + + bool can_cache_this_step = false; + bool is_caching_this_step = false; + + int total_blocks_computed = 0; + int total_blocks_cached = 0; + + void init(const CacheDitConfig& cfg, int num_double_blocks, int num_single_blocks, size_t h_size) { + config = cfg; + total_double_blocks = num_double_blocks; + total_single_blocks = num_single_blocks; + hidden_size = h_size; + + initialized = cfg.dbcache.enabled || cfg.taylorseer.enabled; + + if (!initialized) + return; + + warmup_remaining = cfg.dbcache.max_warmup_steps; + double_block_cache.resize(total_double_blocks); + single_block_cache.resize(total_single_blocks); + + if (cfg.taylorseer.enabled) { + taylor_state.init(cfg.taylorseer.n_derivatives, h_size); + } + + reset_runtime(); + } + + void reset_runtime() { + current_step = -1; + total_steps = 0; + warmup_remaining = config.dbcache.max_warmup_steps; + cached_steps.clear(); + continuous_cached_steps = 0; + accumulated_residual_diff = 0.0f; + + for (auto& entry : double_block_cache) { + entry.residual_img.clear(); + entry.residual_txt.clear(); + entry.prev_img.clear(); + entry.prev_txt.clear(); + entry.has_prev = false; + } + + for (auto& entry : single_block_cache) { + entry.residual.clear(); + entry.prev_output.clear(); + entry.has_prev = false; + } + + Fn_residual_img.clear(); + Fn_residual_txt.clear(); + prev_Fn_residual_img.clear(); + prev_Fn_residual_txt.clear(); + has_prev_Fn_residual = false; + + Bn_buffer_img.clear(); + Bn_buffer_txt.clear(); + Bn_buffer.clear(); + has_Bn_buffer = false; + + taylor_state.reset(); + + can_cache_this_step = false; + is_caching_this_step = false; + + total_blocks_computed = 0; + total_blocks_cached = 0; + } + + bool enabled() const { + return initialized && (config.dbcache.enabled || config.taylorseer.enabled); + } + + void begin_step(int step_index, float sigma = 0.0f) { + if (!enabled()) + return; + if (step_index == current_step) + return; + + current_step = step_index; + total_steps++; + + bool in_warmup = warmup_remaining > 0; + if (in_warmup) { + warmup_remaining--; + } + + bool scm_allows_cache = true; + if (!config.dbcache.steps_computation_mask.empty()) { + if (step_index < static_cast(config.dbcache.steps_computation_mask.size())) { + scm_allows_cache = (config.dbcache.steps_computation_mask[step_index] == 0); + if (!config.dbcache.scm_policy_dynamic && scm_allows_cache) { + can_cache_this_step = true; + is_caching_this_step = false; + return; + } + } + } + + bool max_cached_ok = (config.dbcache.max_cached_steps < 0) || + (static_cast(cached_steps.size()) < config.dbcache.max_cached_steps); + + bool max_cont_ok = (config.dbcache.max_continuous_cached_steps < 0) || + (continuous_cached_steps < config.dbcache.max_continuous_cached_steps); + + bool accum_ok = (config.dbcache.max_accumulated_residual_diff < 0.0f) || + (accumulated_residual_diff < config.dbcache.max_accumulated_residual_diff); + + can_cache_this_step = !in_warmup && scm_allows_cache && max_cached_ok && max_cont_ok && accum_ok && has_prev_Fn_residual; + is_caching_this_step = false; + } + + void end_step(bool was_cached) { + if (was_cached) { + cached_steps.push_back(current_step); + continuous_cached_steps++; + } else { + continuous_cached_steps = 0; + } + } + + static float calculate_residual_diff(const float* prev, const float* curr, size_t size) { + if (size == 0) + return 0.0f; + + float sum_diff = 0.0f; + float sum_abs = 0.0f; + + for (size_t i = 0; i < size; i++) { + sum_diff += std::fabs(prev[i] - curr[i]); + sum_abs += std::fabs(prev[i]); + } + + return sum_diff / (sum_abs + 1e-6f); + } + + static float calculate_residual_diff(const std::vector& prev, const std::vector& curr) { + if (prev.size() != curr.size() || prev.empty()) + return 1.0f; + return calculate_residual_diff(prev.data(), curr.data(), prev.size()); + } + + int get_double_Fn_blocks() const { + return (config.double_Fn_blocks >= 0) ? config.double_Fn_blocks : config.dbcache.Fn_compute_blocks; + } + + int get_double_Bn_blocks() const { + return (config.double_Bn_blocks >= 0) ? config.double_Bn_blocks : config.dbcache.Bn_compute_blocks; + } + + int get_single_Fn_blocks() const { + return (config.single_Fn_blocks >= 0) ? config.single_Fn_blocks : config.dbcache.Fn_compute_blocks; + } + + int get_single_Bn_blocks() const { + return (config.single_Bn_blocks >= 0) ? config.single_Bn_blocks : config.dbcache.Bn_compute_blocks; + } + + bool is_Fn_double_block(int block_idx) const { + return block_idx < get_double_Fn_blocks(); + } + + bool is_Bn_double_block(int block_idx) const { + int Bn = get_double_Bn_blocks(); + return Bn > 0 && block_idx >= (total_double_blocks - Bn); + } + + bool is_Mn_double_block(int block_idx) const { + return !is_Fn_double_block(block_idx) && !is_Bn_double_block(block_idx); + } + + bool is_Fn_single_block(int block_idx) const { + return block_idx < get_single_Fn_blocks(); + } + + bool is_Bn_single_block(int block_idx) const { + int Bn = get_single_Bn_blocks(); + return Bn > 0 && block_idx >= (total_single_blocks - Bn); + } + + bool is_Mn_single_block(int block_idx) const { + return !is_Fn_single_block(block_idx) && !is_Bn_single_block(block_idx); + } + + void store_Fn_residual(const float* img, const float* txt, size_t img_size, size_t txt_size, const float* input_img, const float* input_txt) { + Fn_residual_img.resize(img_size); + Fn_residual_txt.resize(txt_size); + + for (size_t i = 0; i < img_size; i++) { + Fn_residual_img[i] = img[i] - input_img[i]; + } + for (size_t i = 0; i < txt_size; i++) { + Fn_residual_txt[i] = txt[i] - input_txt[i]; + } + } + + bool check_cache_decision() { + if (!can_cache_this_step) { + is_caching_this_step = false; + return false; + } + + if (!has_prev_Fn_residual || prev_Fn_residual_img.empty()) { + is_caching_this_step = false; + return false; + } + + float diff_img = calculate_residual_diff(prev_Fn_residual_img, Fn_residual_img); + float diff_txt = calculate_residual_diff(prev_Fn_residual_txt, Fn_residual_txt); + float diff = (diff_img + diff_txt) / 2.0f; + + if (diff < config.dbcache.residual_diff_threshold) { + is_caching_this_step = true; + accumulated_residual_diff += diff; + return true; + } + + is_caching_this_step = false; + return false; + } + + void update_prev_Fn_residual() { + prev_Fn_residual_img = Fn_residual_img; + prev_Fn_residual_txt = Fn_residual_txt; + has_prev_Fn_residual = !prev_Fn_residual_img.empty(); + } + + void store_double_block_residual(int block_idx, const float* img, const float* txt, size_t img_size, size_t txt_size, const float* prev_img, const float* prev_txt) { + if (block_idx < 0 || block_idx >= static_cast(double_block_cache.size())) + return; + + BlockCacheEntry& entry = double_block_cache[block_idx]; + + entry.residual_img.resize(img_size); + entry.residual_txt.resize(txt_size); + for (size_t i = 0; i < img_size; i++) { + entry.residual_img[i] = img[i] - prev_img[i]; + } + for (size_t i = 0; i < txt_size; i++) { + entry.residual_txt[i] = txt[i] - prev_txt[i]; + } + + entry.prev_img.resize(img_size); + entry.prev_txt.resize(txt_size); + for (size_t i = 0; i < img_size; i++) { + entry.prev_img[i] = img[i]; + } + for (size_t i = 0; i < txt_size; i++) { + entry.prev_txt[i] = txt[i]; + } + entry.has_prev = true; + } + + void apply_double_block_cache(int block_idx, float* img, float* txt, size_t img_size, size_t txt_size) { + if (block_idx < 0 || block_idx >= static_cast(double_block_cache.size())) + return; + + const BlockCacheEntry& entry = double_block_cache[block_idx]; + if (entry.residual_img.size() != img_size || entry.residual_txt.size() != txt_size) + return; + + for (size_t i = 0; i < img_size; i++) { + img[i] += entry.residual_img[i]; + } + for (size_t i = 0; i < txt_size; i++) { + txt[i] += entry.residual_txt[i]; + } + + total_blocks_cached++; + } + + void store_single_block_residual(int block_idx, const float* output, size_t size, const float* input) { + if (block_idx < 0 || block_idx >= static_cast(single_block_cache.size())) + return; + + BlockCacheEntry& entry = single_block_cache[block_idx]; + + entry.residual.resize(size); + for (size_t i = 0; i < size; i++) { + entry.residual[i] = output[i] - input[i]; + } + + entry.prev_output.resize(size); + for (size_t i = 0; i < size; i++) { + entry.prev_output[i] = output[i]; + } + entry.has_prev = true; + } + + void apply_single_block_cache(int block_idx, float* output, size_t size) { + if (block_idx < 0 || block_idx >= static_cast(single_block_cache.size())) + return; + + const BlockCacheEntry& entry = single_block_cache[block_idx]; + if (entry.residual.size() != size) + return; + + for (size_t i = 0; i < size; i++) { + output[i] += entry.residual[i]; + } + + total_blocks_cached++; + } + + void store_Bn_buffer(const float* img, const float* txt, size_t img_size, size_t txt_size, const float* Bn_start_img, const float* Bn_start_txt) { + Bn_buffer_img.resize(img_size); + Bn_buffer_txt.resize(txt_size); + + for (size_t i = 0; i < img_size; i++) { + Bn_buffer_img[i] = img[i] - Bn_start_img[i]; + } + for (size_t i = 0; i < txt_size; i++) { + Bn_buffer_txt[i] = txt[i] - Bn_start_txt[i]; + } + has_Bn_buffer = true; + } + + void apply_Bn_buffer(float* img, float* txt, size_t img_size, size_t txt_size) { + if (!has_Bn_buffer) + return; + if (Bn_buffer_img.size() != img_size || Bn_buffer_txt.size() != txt_size) + return; + + for (size_t i = 0; i < img_size; i++) { + img[i] += Bn_buffer_img[i]; + } + for (size_t i = 0; i < txt_size; i++) { + txt[i] += Bn_buffer_txt[i]; + } + } + + void taylor_update(const float* hidden_state, size_t size) { + if (!config.taylorseer.enabled) + return; + taylor_state.update_derivatives(hidden_state, size, current_step); + } + + bool taylor_can_approximate() const { + return config.taylorseer.enabled && taylor_state.can_approximate(); + } + + void taylor_approximate(float* output, size_t size) { + if (!config.taylorseer.enabled) + return; + taylor_state.approximate(output, size, current_step); + } + + bool should_use_taylor_this_step() const { + if (!config.taylorseer.enabled) + return false; + if (current_step < config.taylorseer.max_warmup_steps) + return false; + + int interval = config.taylorseer.skip_interval_steps; + if (interval <= 0) + interval = 1; + + return (current_step % (interval + 1)) != 0; + } + + void log_metrics() const { + if (!enabled()) + return; + + int total_blocks = total_blocks_computed + total_blocks_cached; + float cache_ratio = (total_blocks > 0) ? (static_cast(total_blocks_cached) / total_blocks * 100.0f) : 0.0f; + + float step_cache_ratio = (total_steps > 0) ? (static_cast(cached_steps.size()) / total_steps * 100.0f) : 0.0f; + + LOG_INFO("CacheDIT: steps_cached=%zu/%d (%.1f%%), blocks_cached=%d/%d (%.1f%%), accum_diff=%.4f", + cached_steps.size(), total_steps, step_cache_ratio, + total_blocks_cached, total_blocks, cache_ratio, + accumulated_residual_diff); + } + + std::string get_summary() const { + char buf[256]; + snprintf(buf, sizeof(buf), + "CacheDIT[thresh=%.2f]: cached %zu/%d steps, %d/%d blocks", + config.dbcache.residual_diff_threshold, + cached_steps.size(), total_steps, + total_blocks_cached, total_blocks_computed + total_blocks_cached); + return std::string(buf); + } +}; + +inline std::vector parse_scm_mask(const std::string& mask_str) { + std::vector mask; + if (mask_str.empty()) + return mask; + + size_t pos = 0; + size_t start = 0; + while ((pos = mask_str.find(',', start)) != std::string::npos) { + std::string token = mask_str.substr(start, pos - start); + mask.push_back(std::stoi(token)); + start = pos + 1; + } + if (start < mask_str.length()) { + mask.push_back(std::stoi(mask_str.substr(start))); + } + + return mask; +} + +inline std::vector generate_scm_mask( + const std::vector& compute_bins, + const std::vector& cache_bins, + int total_steps) { + std::vector mask; + size_t c_idx = 0, cache_idx = 0; + + while (static_cast(mask.size()) < total_steps) { + if (c_idx < compute_bins.size()) { + for (int i = 0; i < compute_bins[c_idx] && static_cast(mask.size()) < total_steps; i++) { + mask.push_back(1); + } + c_idx++; + } + if (cache_idx < cache_bins.size()) { + for (int i = 0; i < cache_bins[cache_idx] && static_cast(mask.size()) < total_steps; i++) { + mask.push_back(0); + } + cache_idx++; + } + if (c_idx >= compute_bins.size() && cache_idx >= cache_bins.size()) + break; + } + + if (!mask.empty()) { + mask.back() = 1; + } + + return mask; +} + +inline std::vector get_scm_preset(const std::string& preset, int total_steps) { + struct Preset { + std::vector compute_bins; + std::vector cache_bins; + }; + + Preset slow = {{8, 3, 3, 2, 1, 1}, {1, 2, 2, 2, 3}}; + Preset medium = {{6, 2, 2, 2, 2, 1}, {1, 3, 3, 3, 3}}; + Preset fast = {{6, 1, 1, 1, 1, 1}, {1, 3, 4, 5, 4}}; + Preset ultra = {{4, 1, 1, 1, 1}, {2, 5, 6, 7}}; + + Preset* p = nullptr; + if (preset == "slow" || preset == "s" || preset == "S") + p = &slow; + else if (preset == "medium" || preset == "m" || preset == "M") + p = &medium; + else if (preset == "fast" || preset == "f" || preset == "F") + p = &fast; + else if (preset == "ultra" || preset == "u" || preset == "U") + p = &ultra; + else + return {}; + + if (total_steps != 28 && total_steps > 0) { + float scale = static_cast(total_steps) / 28.0f; + std::vector scaled_compute, scaled_cache; + + for (int v : p->compute_bins) { + scaled_compute.push_back(std::max(1, static_cast(v * scale + 0.5f))); + } + for (int v : p->cache_bins) { + scaled_cache.push_back(std::max(1, static_cast(v * scale + 0.5f))); + } + + return generate_scm_mask(scaled_compute, scaled_cache, total_steps); + } + + return generate_scm_mask(p->compute_bins, p->cache_bins, total_steps); +} + +inline float get_preset_threshold(const std::string& preset) { + if (preset == "slow" || preset == "s" || preset == "S") + return 0.20f; + if (preset == "medium" || preset == "m" || preset == "M") + return 0.25f; + if (preset == "fast" || preset == "f" || preset == "F") + return 0.30f; + if (preset == "ultra" || preset == "u" || preset == "U") + return 0.34f; + return 0.08f; +} + +inline int get_preset_warmup(const std::string& preset) { + if (preset == "slow" || preset == "s" || preset == "S") + return 8; + if (preset == "medium" || preset == "m" || preset == "M") + return 6; + if (preset == "fast" || preset == "f" || preset == "F") + return 6; + if (preset == "ultra" || preset == "u" || preset == "U") + return 4; + return 8; +} + +inline int get_preset_Fn(const std::string& preset) { + if (preset == "slow" || preset == "s" || preset == "S") + return 8; + if (preset == "medium" || preset == "m" || preset == "M") + return 8; + if (preset == "fast" || preset == "f" || preset == "F") + return 6; + if (preset == "ultra" || preset == "u" || preset == "U") + return 4; + return 8; +} + +inline int get_preset_Bn(const std::string& preset) { + (void)preset; + return 0; +} + +inline void parse_dbcache_options(const std::string& opts, DBCacheConfig& cfg) { + if (opts.empty()) + return; + + int Fn = 8, Bn = 0, warmup = 8, max_cached = -1, max_cont = -1; + float thresh = 0.08f; + + sscanf(opts.c_str(), "%d,%d,%f,%d,%d,%d", + &Fn, &Bn, &thresh, &warmup, &max_cached, &max_cont); + + cfg.Fn_compute_blocks = Fn; + cfg.Bn_compute_blocks = Bn; + cfg.residual_diff_threshold = thresh; + cfg.max_warmup_steps = warmup; + cfg.max_cached_steps = max_cached; + cfg.max_continuous_cached_steps = max_cont; +} + +inline void parse_taylorseer_options(const std::string& opts, TaylorSeerConfig& cfg) { + if (opts.empty()) + return; + + int n_deriv = 1, warmup = 2, interval = 1; + sscanf(opts.c_str(), "%d,%d,%d", &n_deriv, &warmup, &interval); + + cfg.n_derivatives = n_deriv; + cfg.max_warmup_steps = warmup; + cfg.skip_interval_steps = interval; +} + +struct CacheDitConditionState { + DBCacheConfig config; + TaylorSeerConfig taylor_config; + bool initialized = false; + + int current_step_index = -1; + bool step_active = false; + bool skip_current_step = false; + bool initial_step = true; + int warmup_remaining = 0; + std::vector cached_steps; + int continuous_cached_steps = 0; + float accumulated_residual_diff = 0.0f; + int total_steps_skipped = 0; + + const void* anchor_condition = nullptr; + + struct CacheEntry { + std::vector diff; + std::vector prev_input; + std::vector prev_output; + bool has_prev = false; + }; + std::unordered_map cache_diffs; + + TaylorSeerState taylor_state; + + float start_sigma = std::numeric_limits::max(); + float end_sigma = 0.0f; + + void reset_runtime() { + current_step_index = -1; + step_active = false; + skip_current_step = false; + initial_step = true; + warmup_remaining = config.max_warmup_steps; + cached_steps.clear(); + continuous_cached_steps = 0; + accumulated_residual_diff = 0.0f; + total_steps_skipped = 0; + anchor_condition = nullptr; + cache_diffs.clear(); + taylor_state.reset(); + } + + void init(const DBCacheConfig& dbcfg, const TaylorSeerConfig& tcfg) { + config = dbcfg; + taylor_config = tcfg; + initialized = dbcfg.enabled || tcfg.enabled; + reset_runtime(); + + if (taylor_config.enabled) { + taylor_state.init(taylor_config.n_derivatives, 0); + } + } + + void set_sigmas(const std::vector& sigmas) { + if (!initialized || sigmas.size() < 2) + return; + + float start_percent = 0.15f; + float end_percent = 0.95f; + + size_t n_steps = sigmas.size() - 1; + size_t start_step = static_cast(start_percent * n_steps); + size_t end_step = static_cast(end_percent * n_steps); + + if (start_step >= n_steps) + start_step = n_steps - 1; + if (end_step >= n_steps) + end_step = n_steps - 1; + + start_sigma = sigmas[start_step]; + end_sigma = sigmas[end_step]; + + if (start_sigma < end_sigma) { + std::swap(start_sigma, end_sigma); + } + } + + bool enabled() const { + return initialized && (config.enabled || taylor_config.enabled); + } + + void begin_step(int step_index, float sigma) { + if (!enabled()) + return; + if (step_index == current_step_index) + return; + + current_step_index = step_index; + skip_current_step = false; + step_active = false; + + if (sigma > start_sigma) + return; + if (!(sigma > end_sigma)) + return; + + step_active = true; + + if (warmup_remaining > 0) { + warmup_remaining--; + return; + } + + if (!config.steps_computation_mask.empty()) { + if (step_index < static_cast(config.steps_computation_mask.size())) { + if (config.steps_computation_mask[step_index] == 1) { + return; + } + } + } + + if (config.max_cached_steps >= 0 && + static_cast(cached_steps.size()) >= config.max_cached_steps) { + return; + } + + if (config.max_continuous_cached_steps >= 0 && + continuous_cached_steps >= config.max_continuous_cached_steps) { + return; + } + } + + bool step_is_active() const { + return enabled() && step_active; + } + + bool is_step_skipped() const { + return enabled() && step_active && skip_current_step; + } + + bool has_cache(const void* cond) const { + auto it = cache_diffs.find(cond); + return it != cache_diffs.end() && !it->second.diff.empty(); + } + + void update_cache(const void* cond, const float* input, const float* output, size_t size) { + CacheEntry& entry = cache_diffs[cond]; + entry.diff.resize(size); + for (size_t i = 0; i < size; i++) { + entry.diff[i] = output[i] - input[i]; + } + + entry.prev_input.resize(size); + entry.prev_output.resize(size); + for (size_t i = 0; i < size; i++) { + entry.prev_input[i] = input[i]; + entry.prev_output[i] = output[i]; + } + entry.has_prev = true; + } + + void apply_cache(const void* cond, const float* input, float* output, size_t size) { + auto it = cache_diffs.find(cond); + if (it == cache_diffs.end() || it->second.diff.empty()) + return; + if (it->second.diff.size() != size) + return; + + for (size_t i = 0; i < size; i++) { + output[i] = input[i] + it->second.diff[i]; + } + } + + bool before_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output, float sigma, int step_index) { + if (!enabled() || step_index < 0) + return false; + + if (step_index != current_step_index) { + begin_step(step_index, sigma); + } + + if (!step_active) + return false; + + if (initial_step) { + anchor_condition = cond; + initial_step = false; + } + + bool is_anchor = (cond == anchor_condition); + + if (skip_current_step) { + if (has_cache(cond)) { + apply_cache(cond, (float*)input->data, (float*)output->data, + static_cast(ggml_nelements(output))); + return true; + } + return false; + } + + if (!is_anchor) + return false; + + auto it = cache_diffs.find(cond); + if (it == cache_diffs.end() || !it->second.has_prev) + return false; + + size_t ne = static_cast(ggml_nelements(input)); + if (it->second.prev_input.size() != ne) + return false; + + float* input_data = (float*)input->data; + float diff = CacheDitState::calculate_residual_diff( + it->second.prev_input.data(), input_data, ne); + + float effective_threshold = config.residual_diff_threshold; + if (config.Fn_compute_blocks > 0) { + float fn_confidence = 1.0f + 0.02f * (config.Fn_compute_blocks - 8); + fn_confidence = std::max(0.5f, std::min(2.0f, fn_confidence)); + effective_threshold *= fn_confidence; + } + if (config.Bn_compute_blocks > 0) { + float bn_quality = 1.0f - 0.03f * config.Bn_compute_blocks; + bn_quality = std::max(0.5f, std::min(1.0f, bn_quality)); + effective_threshold *= bn_quality; + } + + if (diff < effective_threshold) { + skip_current_step = true; + total_steps_skipped++; + cached_steps.push_back(current_step_index); + continuous_cached_steps++; + accumulated_residual_diff += diff; + apply_cache(cond, input_data, (float*)output->data, ne); + return true; + } + + continuous_cached_steps = 0; + return false; + } + + void after_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output) { + if (!step_is_active()) + return; + + size_t ne = static_cast(ggml_nelements(output)); + update_cache(cond, (float*)input->data, (float*)output->data, ne); + + if (cond == anchor_condition && taylor_config.enabled) { + taylor_state.update_derivatives((float*)output->data, ne, current_step_index); + } + } + + void log_metrics() const { + if (!enabled()) + return; + + LOG_INFO("CacheDIT: steps_skipped=%d/%d (%.1f%%), accum_residual_diff=%.4f", + total_steps_skipped, + current_step_index + 1, + (current_step_index > 0) ? (100.0f * total_steps_skipped / (current_step_index + 1)) : 0.0f, + accumulated_residual_diff); + } +}; + +#endif diff --git a/clip.hpp b/src/clip.hpp similarity index 94% rename from clip.hpp rename to src/clip.hpp index 24c94f1b..adecd4d2 100644 --- a/clip.hpp +++ b/src/clip.hpp @@ -4,6 +4,7 @@ #include "ggml_extend.hpp" #include "model.h" #include "tokenize_util.h" +#include "vocab/vocab.h" /*================================================== CLIPTokenizer ===================================================*/ @@ -110,7 +111,7 @@ public: if (merges_utf8_str.size() > 0) { load_from_merges(merges_utf8_str); } else { - load_from_merges(ModelLoader::load_merges()); + load_from_merges(load_clip_merges()); } add_special_token("<|startoftext|>"); add_special_token("<|endoftext|>"); @@ -296,7 +297,7 @@ public: size_t max_length = 0, bool padding = false) { if (max_length > 0 && padding) { - size_t n = std::ceil(tokens.size() * 1.0 / (max_length - 2)); + size_t n = static_cast(std::ceil(tokens.size() * 1.0 / (max_length - 2))); if (n == 0) { n = 1; } @@ -479,9 +480,9 @@ public: x = fc1->forward(ctx, x); if (use_gelu) { - x = ggml_gelu_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); } else { - x = ggml_gelu_quick_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu_quick(ctx->ggml_ctx, x, true); } x = fc2->forward(ctx, x); return x; @@ -510,7 +511,7 @@ public: blocks["mlp"] = std::shared_ptr(new CLIPMLP(d_model, intermediate_size)); } - struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, bool mask = true) { + struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* mask = nullptr) { // x: [N, n_token, d_model] auto self_attn = std::dynamic_pointer_cast(blocks["self_attn"]); auto layer_norm1 = std::dynamic_pointer_cast(blocks["layer_norm1"]); @@ -525,10 +526,10 @@ public: struct CLIPEncoder : public GGMLBlock { protected: - int64_t n_layer; + int n_layer; public: - CLIPEncoder(int64_t n_layer, + CLIPEncoder(int n_layer, int64_t d_model, int64_t n_head, int64_t intermediate_size, @@ -542,8 +543,8 @@ public: struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, - int clip_skip = -1, - bool mask = true) { + struct ggml_tensor* mask = nullptr, + int clip_skip = -1) { // x: [N, n_token, d_model] int layer_idx = n_layer - 1; // LOG_DEBUG("clip_skip %d", clip_skip); @@ -623,10 +624,10 @@ public: class CLIPVisionEmbeddings : public GGMLBlock { protected: int64_t embed_dim; - int64_t num_channels; - int64_t patch_size; - int64_t image_size; - int64_t num_patches; + int num_channels; + int patch_size; + int image_size; + int num_patches; int64_t num_positions; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { @@ -641,9 +642,9 @@ protected: public: CLIPVisionEmbeddings(int64_t embed_dim, - int64_t num_channels = 3, - int64_t patch_size = 14, - int64_t image_size = 224) + int num_channels = 3, + int patch_size = 14, + int image_size = 224) : embed_dim(embed_dim), num_channels(num_channels), patch_size(patch_size), @@ -741,16 +742,17 @@ public: struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* input_ids, struct ggml_tensor* tkn_embeddings, - size_t max_token_idx = 0, - bool return_pooled = false, - int clip_skip = -1) { + struct ggml_tensor* mask = nullptr, + size_t max_token_idx = 0, + bool return_pooled = false, + int clip_skip = -1) { // input_ids: [N, n_token] auto embeddings = std::dynamic_pointer_cast(blocks["embeddings"]); auto encoder = std::dynamic_pointer_cast(blocks["encoder"]); auto final_layer_norm = std::dynamic_pointer_cast(blocks["final_layer_norm"]); auto x = embeddings->forward(ctx, input_ids, tkn_embeddings); // [N, n_token, hidden_size] - x = encoder->forward(ctx, x, return_pooled ? -1 : clip_skip, true); + x = encoder->forward(ctx, x, mask, return_pooled ? -1 : clip_skip); if (return_pooled || with_final_ln) { x = final_layer_norm->forward(ctx, x); } @@ -814,10 +816,11 @@ public: auto x = embeddings->forward(ctx, pixel_values); // [N, num_positions, embed_dim] x = pre_layernorm->forward(ctx, x); - x = encoder->forward(ctx, x, clip_skip, false); - // print_ggml_tensor(x, true, "ClipVisionModel x: "); + x = encoder->forward(ctx, x, nullptr, clip_skip); + auto last_hidden_state = x; - x = post_layernorm->forward(ctx, x); // [N, n_token, hidden_size] + + x = post_layernorm->forward(ctx, x); // [N, n_token, hidden_size] GGML_ASSERT(x->ne[3] == 1); if (return_pooled) { @@ -905,6 +908,8 @@ public: struct CLIPTextModelRunner : public GGMLRunner { CLIPTextModel model; + std::vector attention_mask_vec; + CLIPTextModelRunner(ggml_backend_t backend, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map, @@ -938,6 +943,7 @@ struct CLIPTextModelRunner : public GGMLRunner { struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* input_ids, struct ggml_tensor* embeddings, + struct ggml_tensor* mask, size_t max_token_idx = 0, bool return_pooled = false, int clip_skip = -1) { @@ -948,7 +954,7 @@ struct CLIPTextModelRunner : public GGMLRunner { input_ids = ggml_reshape_2d(ctx->ggml_ctx, input_ids, model.n_token, input_ids->ne[0] / model.n_token); } - return model.forward(ctx, input_ids, embeddings, max_token_idx, return_pooled, clip_skip); + return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip); } struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids, @@ -975,9 +981,23 @@ struct CLIPTextModelRunner : public GGMLRunner { embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1); } + int n_tokens = static_cast(input_ids->ne[0]); + attention_mask_vec.resize(n_tokens * n_tokens); + for (int i0 = 0; i0 < n_tokens; i0++) { + for (int i1 = 0; i1 < n_tokens; i1++) { + float value = 0.f; + if (i0 > i1) { + value = -INFINITY; + } + attention_mask_vec[i1 * n_tokens + i0] = value; + } + } + auto attention_mask = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, n_tokens, n_tokens); + set_backend_tensor_data(attention_mask, attention_mask_vec.data()); + auto runner_ctx = get_context(); - struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, max_token_idx, return_pooled, clip_skip); + struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip); ggml_build_forward_expand(gf, hidden_states); diff --git a/common.hpp b/src/common_block.hpp similarity index 96% rename from common.hpp rename to src/common_block.hpp index 74b218ab..435afa4f 100644 --- a/common.hpp +++ b/src/common_block.hpp @@ -1,5 +1,5 @@ -#ifndef __COMMON_HPP__ -#define __COMMON_HPP__ +#ifndef __COMMON_BLOCK_HPP__ +#define __COMMON_BLOCK_HPP__ #include "ggml_extend.hpp" @@ -28,7 +28,7 @@ public: if (vae_downsample) { auto conv = std::dynamic_pointer_cast(blocks["conv"]); - x = ggml_pad(ctx->ggml_ctx, x, 1, 1, 0, 0); + x = ggml_ext_pad(ctx->ggml_ctx, x, 1, 1, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); x = conv->forward(ctx, x); } else { auto conv = std::dynamic_pointer_cast(blocks["op"]); @@ -80,7 +80,7 @@ protected: std::pair padding) { GGML_ASSERT(dims == 2 || dims == 3); if (dims == 3) { - return std::shared_ptr(new Conv3dnx1x1(in_channels, out_channels, kernel_size.first, 1, padding.first)); + return std::shared_ptr(new Conv3d(in_channels, out_channels, {kernel_size.first, 1, 1}, {1, 1, 1}, {padding.first, 0, 0})); } else { return std::shared_ptr(new Conv2d(in_channels, out_channels, kernel_size, {1, 1}, padding)); } @@ -200,7 +200,7 @@ public: gate = ggml_cont(ctx->ggml_ctx, gate); - gate = ggml_gelu_inplace(ctx->ggml_ctx, gate); + gate = ggml_ext_gelu(ctx->ggml_ctx, gate, true); x = ggml_mul(ctx->ggml_ctx, x, gate); // [ne3, ne2, ne1, dim_out] @@ -220,7 +220,7 @@ public: auto proj = std::dynamic_pointer_cast(blocks["proj"]); x = proj->forward(ctx, x); - x = ggml_gelu_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); return x; } }; @@ -317,7 +317,7 @@ public: auto k = to_k->forward(ctx, context); // [N, n_context, inner_dim] auto v = to_v->forward(ctx, context); // [N, n_context, inner_dim] - x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, n_head, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, inner_dim] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, n_head, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, inner_dim] x = to_out_0->forward(ctx, x); // [N, n_token, query_dim] return x; @@ -536,17 +536,17 @@ public: // image_only_indicator is always tensor([0.]) float alpha = get_alpha(); auto x = ggml_add(ctx->ggml_ctx, - ggml_scale(ctx->ggml_ctx, x_spatial, alpha), - ggml_scale(ctx->ggml_ctx, x_temporal, 1.0f - alpha)); + ggml_ext_scale(ctx->ggml_ctx, x_spatial, alpha), + ggml_ext_scale(ctx->ggml_ctx, x_temporal, 1.0f - alpha)); return x; } }; class VideoResBlock : public ResBlock { public: - VideoResBlock(int channels, - int emb_channels, - int out_channels, + VideoResBlock(int64_t channels, + int64_t emb_channels, + int64_t out_channels, std::pair kernel_size = {3, 3}, int64_t video_kernel_size = 3, int dims = 2) // always 2 @@ -590,4 +590,4 @@ public: } }; -#endif // __COMMON_HPP__ +#endif // __COMMON_BLOCK_HPP__ diff --git a/src/common_dit.hpp b/src/common_dit.hpp new file mode 100644 index 00000000..0e6f0f08 --- /dev/null +++ b/src/common_dit.hpp @@ -0,0 +1,108 @@ +#ifndef __COMMON_DIT_HPP__ +#define __COMMON_DIT_HPP__ + +#include "ggml_extend.hpp" + +namespace DiT { + ggml_tensor* patchify(ggml_context* ctx, + ggml_tensor* x, + int pw, + int ph, + bool patch_last = true) { + // x: [N, C, H, W] + // return: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C] + int64_t N = x->ne[3]; + int64_t C = x->ne[2]; + int64_t H = x->ne[1]; + int64_t W = x->ne[0]; + int64_t h = H / ph; + int64_t w = W / pw; + + GGML_ASSERT(h * ph == H && w * pw == W); + + x = ggml_reshape_4d(ctx, x, pw, w, ph, h * C * N); // [N*C*h, ph, w, pw] + x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, ph, pw] + x = ggml_reshape_4d(ctx, x, pw * ph, w * h, C, N); // [N, C, h*w, ph*pw] + if (patch_last) { + x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, h*w, C, ph*pw] + x = ggml_reshape_3d(ctx, x, pw * ph * C, w * h, N); // [N, h*w, C*ph*pw] + } else { + x = ggml_cont(ctx, ggml_ext_torch_permute(ctx, x, 2, 0, 1, 3)); // [N, h*w, C, ph*pw] + x = ggml_reshape_3d(ctx, x, C * pw * ph, w * h, N); // [N, h*w, ph*pw*C] + } + return x; + } + + ggml_tensor* unpatchify(ggml_context* ctx, + ggml_tensor* x, + int64_t h, + int64_t w, + int ph, + int pw, + bool patch_last = true) { + // x: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C] + // return: [N, C, H, W] + int64_t N = x->ne[2]; + int64_t C = x->ne[0] / ph / pw; + int64_t H = h * ph; + int64_t W = w * pw; + + GGML_ASSERT(C * ph * pw == x->ne[0]); + + if (patch_last) { + x = ggml_reshape_4d(ctx, x, pw * ph, C, w * h, N); // [N, h*w, C, ph*pw] + x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, C, h*w, ph*pw] + } else { + x = ggml_reshape_4d(ctx, x, C, pw * ph, w * h, N); // [N, h*w, ph*pw, C] + x = ggml_cont(ctx, ggml_permute(ctx, x, 2, 0, 1, 3)); // [N, C, h*w, ph*pw] + } + + x = ggml_reshape_4d(ctx, x, pw, ph, w, h * C * N); // [N*C*h, w, ph, pw] + x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, ph, w, pw] + x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*ph, w*pw] + + return x; + } + + ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, + ggml_tensor* x, + int ph, + int pw) { + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; + + int pad_h = (ph - H % ph) % ph; + int pad_w = (pw - W % pw) % pw; + x = ggml_ext_pad(ctx->ggml_ctx, x, pad_w, pad_h, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); + return x; + } + + ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx, + ggml_tensor* x, + int ph, + int pw, + bool patch_last = true) { + x = pad_to_patch_size(ctx, x, ph, pw); + x = patchify(ctx->ggml_ctx, x, ph, pw, patch_last); + return x; + } + + ggml_tensor* unpatchify_and_crop(ggml_context* ctx, + ggml_tensor* x, + int64_t H, + int64_t W, + int ph, + int pw, + bool patch_last = true) { + int pad_h = (ph - H % ph) % ph; + int pad_w = (pw - W % pw) % pw; + int64_t h = ((H + pad_h) / ph); + int64_t w = ((W + pad_w) / pw); + x = unpatchify(ctx, x, h, w, ph, pw, patch_last); // [N, C, H + pad_h, W + pad_w] + x = ggml_ext_slice(ctx, x, 1, 0, H); // [N, C, H, W + pad_w] + x = ggml_ext_slice(ctx, x, 0, 0, W); // [N, C, H, W] + return x; + } +} // namespace DiT + +#endif // __COMMON_DIT_HPP__ \ No newline at end of file diff --git a/conditioner.hpp b/src/conditioner.hpp similarity index 82% rename from conditioner.hpp rename to src/conditioner.hpp index 45db314b..d4a3146b 100644 --- a/conditioner.hpp +++ b/src/conditioner.hpp @@ -10,9 +10,14 @@ struct SDCondition { struct ggml_tensor* c_vector = nullptr; // aka y struct ggml_tensor* c_concat = nullptr; + std::vector extra_c_crossattns; + SDCondition() = default; - SDCondition(struct ggml_tensor* c_crossattn, struct ggml_tensor* c_vector, struct ggml_tensor* c_concat) - : c_crossattn(c_crossattn), c_vector(c_vector), c_concat(c_concat) {} + SDCondition(struct ggml_tensor* c_crossattn, + struct ggml_tensor* c_vector, + struct ggml_tensor* c_concat, + const std::vector& extra_c_crossattns = {}) + : c_crossattn(c_crossattn), c_vector(c_vector), c_concat(c_concat), extra_c_crossattns(extra_c_crossattns) {} }; struct ConditionerParams { @@ -34,6 +39,7 @@ struct Conditioner { virtual void free_params_buffer() = 0; virtual void get_param_tensors(std::map& tensors) = 0; virtual size_t get_params_buffer_size() = 0; + virtual void set_flash_attention_enabled(bool enabled) = 0; virtual void set_weight_adapter(const std::shared_ptr& adapter) {} virtual std::tuple> get_learned_condition_with_trigger(ggml_context* work_ctx, int n_threads, @@ -115,6 +121,13 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { return buffer_size; } + void set_flash_attention_enabled(bool enabled) override { + text_model->set_flash_attention_enabled(enabled); + if (sd_version_is_sdxl(version)) { + text_model2->set_flash_attention_enabled(enabled); + } + } + void set_weight_adapter(const std::shared_ptr& adapter) override { text_model->set_weight_adapter(adapter); if (sd_version_is_sdxl(version)) { @@ -303,11 +316,11 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { int class_token = clean_input_ids[class_token_index[0]]; class_idx = tokens_acc + class_token_index[0]; std::vector clean_input_ids_tmp; - for (uint32_t i = 0; i < class_token_index[0]; i++) + for (int i = 0; i < class_token_index[0]; i++) clean_input_ids_tmp.push_back(clean_input_ids[i]); - for (uint32_t i = 0; i < (pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs); i++) + for (int i = 0; i < (pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs); i++) clean_input_ids_tmp.push_back(class_token); - for (uint32_t i = class_token_index[0] + 1; i < clean_input_ids.size(); i++) + for (int i = class_token_index[0] + 1; i < clean_input_ids.size(); i++) clean_input_ids_tmp.push_back(clean_input_ids[i]); clean_input_ids.clear(); clean_input_ids = clean_input_ids_tmp; @@ -322,7 +335,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { tokenizer.pad_tokens(tokens, weights, max_length, padding); int offset = pm_version == PM_VERSION_2 ? 2 * num_input_imgs : num_input_imgs; - for (uint32_t i = 0; i < tokens.size(); i++) { + for (int i = 0; i < tokens.size(); i++) { // if (class_idx + 1 <= i && i < class_idx + 1 + 2*num_input_imgs) // photomaker V2 has num_tokens(=2)*num_input_imgs if (class_idx + 1 <= i && i < class_idx + 1 + offset) // photomaker V2 has num_tokens(=2)*num_input_imgs // hardcode for now @@ -783,6 +796,18 @@ struct SD3CLIPEmbedder : public Conditioner { return buffer_size; } + void set_flash_attention_enabled(bool enabled) override { + if (clip_l) { + clip_l->set_flash_attention_enabled(enabled); + } + if (clip_g) { + clip_g->set_flash_attention_enabled(enabled); + } + if (t5) { + t5->set_flash_attention_enabled(enabled); + } + } + void set_weight_adapter(const std::shared_ptr& adapter) override { if (clip_l) { clip_l->set_weight_adapter(adapter); @@ -1191,6 +1216,15 @@ struct FluxCLIPEmbedder : public Conditioner { return buffer_size; } + void set_flash_attention_enabled(bool enabled) override { + if (clip_l) { + clip_l->set_flash_attention_enabled(enabled); + } + if (t5) { + t5->set_flash_attention_enabled(enabled); + } + } + void set_weight_adapter(const std::shared_ptr& adapter) { if (clip_l) { clip_l->set_weight_adapter(adapter); @@ -1440,6 +1474,12 @@ struct T5CLIPEmbedder : public Conditioner { return buffer_size; } + void set_flash_attention_enabled(bool enabled) override { + if (t5) { + t5->set_flash_attention_enabled(enabled); + } + } + void set_weight_adapter(const std::shared_ptr& adapter) override { if (t5) { t5->set_weight_adapter(adapter); @@ -1584,7 +1624,7 @@ struct T5CLIPEmbedder : public Conditioner { chunk_hidden_states->ne[0], ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); - modify_mask_to_attend_padding(t5_attn_mask, ggml_nelements(t5_attn_mask), mask_pad); + modify_mask_to_attend_padding(t5_attn_mask, static_cast(ggml_nelements(t5_attn_mask)), mask_pad); return {hidden_states, t5_attn_mask, nullptr}; } @@ -1601,6 +1641,142 @@ struct T5CLIPEmbedder : public Conditioner { } }; +struct AnimaConditioner : public Conditioner { + std::shared_ptr qwen_tokenizer; + T5UniGramTokenizer t5_tokenizer; + std::shared_ptr llm; + + AnimaConditioner(ggml_backend_t backend, + bool offload_params_to_cpu, + const String2TensorStorage& tensor_storage_map = {}) { + qwen_tokenizer = std::make_shared(); + llm = std::make_shared(LLM::LLMArch::QWEN3, + backend, + offload_params_to_cpu, + tensor_storage_map, + "text_encoders.llm", + false); + } + + void get_param_tensors(std::map& tensors) override { + llm->get_param_tensors(tensors, "text_encoders.llm"); + } + + void alloc_params_buffer() override { + llm->alloc_params_buffer(); + } + + void free_params_buffer() override { + llm->free_params_buffer(); + } + + size_t get_params_buffer_size() override { + return llm->get_params_buffer_size(); + } + + void set_flash_attention_enabled(bool enabled) override { + llm->set_flash_attention_enabled(enabled); + } + + void set_weight_adapter(const std::shared_ptr& adapter) override { + llm->set_weight_adapter(adapter); + } + + std::tuple, std::vector, std::vector, std::vector> tokenize(std::string text) { + auto parsed_attention = parse_prompt_attention(text); + + { + std::stringstream ss; + ss << "["; + for (const auto& item : parsed_attention) { + ss << "['" << item.first << "', " << item.second << "], "; + } + ss << "]"; + LOG_DEBUG("parse '%s' to %s", text.c_str(), ss.str().c_str()); + } + + std::vector qwen_tokens; + std::vector qwen_weights; + std::vector t5_tokens; + std::vector t5_weights; + + for (const auto& item : parsed_attention) { + const std::string& curr_text = item.first; + std::vector curr_tokens = qwen_tokenizer->tokenize(curr_text, nullptr); + qwen_tokens.insert(qwen_tokens.end(), curr_tokens.begin(), curr_tokens.end()); + // Anima uses uniform Qwen token weights. + qwen_weights.insert(qwen_weights.end(), curr_tokens.size(), 1.f); + } + if (qwen_tokens.empty()) { + qwen_tokens.push_back(151643); // qwen3 pad token + qwen_weights.push_back(1.f); + } + + for (const auto& item : parsed_attention) { + const std::string& curr_text = item.first; + float curr_weight = item.second; + std::vector curr_tokens = t5_tokenizer.Encode(curr_text, true); + t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end()); + t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight); + } + + return {qwen_tokens, qwen_weights, t5_tokens, t5_weights}; + } + + SDCondition get_learned_condition(ggml_context* work_ctx, + int n_threads, + const ConditionerParams& conditioner_params) override { + int64_t t0 = ggml_time_ms(); + + auto tokenized = tokenize(conditioner_params.text); + auto& qwen_tokens = std::get<0>(tokenized); + auto& qwen_weights = std::get<1>(tokenized); + auto& t5_tokens = std::get<2>(tokenized); + auto& t5_weights = std::get<3>(tokenized); + + auto input_ids = vector_to_ggml_tensor_i32(work_ctx, qwen_tokens); + + struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 1024] + llm->compute(n_threads, + input_ids, + nullptr, + {}, + {}, + &hidden_states, + work_ctx); + + { + auto tensor = hidden_states; + float original_mean = ggml_ext_tensor_mean(tensor); + for (int i2 = 0; i2 < tensor->ne[2]; i2++) { + for (int i1 = 0; i1 < tensor->ne[1]; i1++) { + for (int i0 = 0; i0 < tensor->ne[0]; i0++) { + float value = ggml_ext_tensor_get_f32(tensor, i0, i1, i2); + value *= qwen_weights[i1]; + ggml_ext_tensor_set_f32(tensor, value, i0, i1, i2); + } + } + } + float new_mean = ggml_ext_tensor_mean(tensor); + if (new_mean != 0.f) { + ggml_ext_tensor_scale_inplace(tensor, (original_mean / new_mean)); + } + } + + struct ggml_tensor* t5_ids_tensor = nullptr; + struct ggml_tensor* t5_weight_tensor = nullptr; + if (!t5_tokens.empty()) { + t5_ids_tensor = vector_to_ggml_tensor_i32(work_ctx, t5_tokens); + t5_weight_tensor = vector_to_ggml_tensor(work_ctx, t5_weights); + } + + int64_t t1 = ggml_time_ms(); + LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0); + + return {hidden_states, t5_weight_tensor, t5_ids_tensor}; + } +}; + struct LLMEmbedder : public Conditioner { SDVersion version; std::shared_ptr tokenizer; @@ -1614,9 +1790,9 @@ struct LLMEmbedder : public Conditioner { bool enable_vision = false) : version(version) { LLM::LLMArch arch = LLM::LLMArch::QWEN2_5_VL; - if (sd_version_is_flux2(version)) { + if (version == VERSION_FLUX2) { arch = LLM::LLMArch::MISTRAL_SMALL_3_2; - } else if (sd_version_is_z_image(version) || version == VERSION_OVIS_IMAGE) { + } else if (sd_version_is_z_image(version) || version == VERSION_OVIS_IMAGE || version == VERSION_FLUX2_KLEIN) { arch = LLM::LLMArch::QWEN3; } if (arch == LLM::LLMArch::MISTRAL_SMALL_3_2) { @@ -1650,6 +1826,10 @@ struct LLMEmbedder : public Conditioner { return buffer_size; } + void set_flash_attention_enabled(bool enabled) override { + llm->set_flash_attention_enabled(enabled); + } + void set_weight_adapter(const std::shared_ptr& adapter) override { if (llm) { llm->set_weight_adapter(adapter); @@ -1657,18 +1837,23 @@ struct LLMEmbedder : public Conditioner { } std::tuple, std::vector> tokenize(std::string text, - std::pair attn_range, + const std::pair& attn_range, size_t max_length = 0, bool padding = false) { std::vector> parsed_attention; - parsed_attention.emplace_back(text.substr(0, attn_range.first), 1.f); - if (attn_range.second - attn_range.first > 0) { - auto new_parsed_attention = parse_prompt_attention(text.substr(attn_range.first, attn_range.second - attn_range.first)); - parsed_attention.insert(parsed_attention.end(), - new_parsed_attention.begin(), - new_parsed_attention.end()); + if (attn_range.first >= 0 && attn_range.second > 0) { + parsed_attention.emplace_back(text.substr(0, attn_range.first), 1.f); + if (attn_range.second - attn_range.first > 0) { + auto new_parsed_attention = parse_prompt_attention(text.substr(attn_range.first, attn_range.second - attn_range.first)); + parsed_attention.insert(parsed_attention.end(), + new_parsed_attention.begin(), + new_parsed_attention.end()); + } + parsed_attention.emplace_back(text.substr(attn_range.second), 1.f); + } else { + parsed_attention.emplace_back(text, 1.f); } - parsed_attention.emplace_back(text.substr(attn_range.second), 1.f); + { std::stringstream ss; ss << "["; @@ -1699,145 +1884,47 @@ struct LLMEmbedder : public Conditioner { return {tokens, weights}; } - SDCondition get_learned_condition(ggml_context* work_ctx, - int n_threads, - const ConditionerParams& conditioner_params) override { - std::string prompt; - std::vector> image_embeds; - std::pair prompt_attn_range; - int prompt_template_encode_start_idx = 34; - int max_length = 0; - std::set out_layers; - if (llm->enable_vision && conditioner_params.ref_images.size() > 0) { - LOG_INFO("QwenImageEditPlusPipeline"); - prompt_template_encode_start_idx = 64; - int image_embed_idx = 64 + 6; - - int min_pixels = 384 * 384; - int max_pixels = 560 * 560; - std::string placeholder = "<|image_pad|>"; - std::string img_prompt; - - for (int i = 0; i < conditioner_params.ref_images.size(); i++) { - sd_image_f32_t image = sd_image_t_to_sd_image_f32_t(*conditioner_params.ref_images[i]); - double factor = llm->params.vision.patch_size * llm->params.vision.spatial_merge_size; - int height = image.height; - int width = image.width; - int h_bar = static_cast(std::round(height / factor)) * factor; - int w_bar = static_cast(std::round(width / factor)) * factor; - - if (static_cast(h_bar) * w_bar > max_pixels) { - double beta = std::sqrt((height * width) / static_cast(max_pixels)); - h_bar = std::max(static_cast(factor), - static_cast(std::floor(height / beta / factor)) * static_cast(factor)); - w_bar = std::max(static_cast(factor), - static_cast(std::floor(width / beta / factor)) * static_cast(factor)); - } else if (static_cast(h_bar) * w_bar < min_pixels) { - double beta = std::sqrt(static_cast(min_pixels) / (height * width)); - h_bar = static_cast(std::ceil(height * beta / factor)) * static_cast(factor); - w_bar = static_cast(std::ceil(width * beta / factor)) * static_cast(factor); - } - - LOG_DEBUG("resize conditioner ref image %d from %dx%d to %dx%d", i, image.height, image.width, h_bar, w_bar); - - sd_image_f32_t resized_image = clip_preprocess(image, w_bar, h_bar); - free(image.data); - image.data = nullptr; - - ggml_tensor* image_tensor = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, resized_image.width, resized_image.height, 3, 1); - sd_image_f32_to_ggml_tensor(resized_image, image_tensor, false); - free(resized_image.data); - resized_image.data = nullptr; - - ggml_tensor* image_embed = nullptr; - llm->encode_image(n_threads, image_tensor, &image_embed, work_ctx); - image_embeds.emplace_back(image_embed_idx, image_embed); - image_embed_idx += 1 + image_embed->ne[1] + 6; - - img_prompt += "Picture " + std::to_string(i + 1) + ": <|vision_start|>"; // [24669, 220, index, 25, 220, 151652] - int64_t num_image_tokens = image_embed->ne[1]; - img_prompt.reserve(num_image_tokens * placeholder.size()); - for (int j = 0; j < num_image_tokens; j++) { - img_prompt += placeholder; - } - img_prompt += "<|vision_end|>"; - } - - prompt = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n"; - prompt += img_prompt; - - prompt_attn_range.first = static_cast(prompt.size()); - prompt += conditioner_params.text; - prompt_attn_range.second = static_cast(prompt.size()); - - prompt += "<|im_end|>\n<|im_start|>assistant\n"; - } else if (sd_version_is_flux2(version)) { - prompt_template_encode_start_idx = 0; - out_layers = {10, 20, 30}; - - prompt = "[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]"; - - prompt_attn_range.first = static_cast(prompt.size()); - prompt += conditioner_params.text; - prompt_attn_range.second = static_cast(prompt.size()); - - prompt += "[/INST]"; - } else if (sd_version_is_z_image(version)) { - prompt_template_encode_start_idx = 0; - out_layers = {35}; // -2 - - prompt = "<|im_start|>user\n"; - - prompt_attn_range.first = static_cast(prompt.size()); - prompt += conditioner_params.text; - prompt_attn_range.second = static_cast(prompt.size()); - - prompt += "<|im_end|>\n<|im_start|>assistant\n"; - } else if (sd_version_is_flux2(version)) { - prompt_template_encode_start_idx = 0; - out_layers = {10, 20, 30}; - - prompt = "[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]"; - - prompt_attn_range.first = prompt.size(); - prompt += conditioner_params.text; - prompt_attn_range.second = prompt.size(); - - prompt += "[/INST]"; - } else if (version == VERSION_OVIS_IMAGE) { - prompt_template_encode_start_idx = 28; - max_length = prompt_template_encode_start_idx + 256; - - prompt = "<|im_start|>user\nDescribe the image by detailing the color, quantity, text, shape, size, texture, spatial relationships of the objects and background:"; - - prompt_attn_range.first = static_cast(prompt.size()); - prompt += " " + conditioner_params.text; - prompt_attn_range.second = static_cast(prompt.size()); - - prompt += "<|im_end|>\n<|im_start|>assistant\n\n\n\n\n"; - } else { - prompt_template_encode_start_idx = 34; - - prompt = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n"; - - prompt_attn_range.first = static_cast(prompt.size()); - prompt += conditioner_params.text; - prompt_attn_range.second = static_cast(prompt.size()); - - prompt += "<|im_end|>\n<|im_start|>assistant\n"; - } - - auto tokens_and_weights = tokenize(prompt, prompt_attn_range, max_length, max_length > 0); + ggml_tensor* encode_prompt(ggml_context* work_ctx, + int n_threads, + const std::string prompt, + const std::pair& prompt_attn_range, + int max_length, + int min_length, + std::vector> image_embeds, + const std::set& out_layers, + int prompt_template_encode_start_idx) { + auto tokens_and_weights = tokenize(prompt, prompt_attn_range); auto& tokens = std::get<0>(tokens_and_weights); auto& weights = std::get<1>(tokens_and_weights); + std::vector mask; - int64_t t0 = ggml_time_ms(); - struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 3584] + if (max_length > 0 && tokens.size() < max_length) { + mask.insert(mask.end(), tokens.size(), 1.f); + mask.insert(mask.end(), max_length - tokens.size(), 0.f); + tokenizer->pad_tokens(tokens, weights, max_length, true); + } + + struct ggml_tensor* hidden_states = nullptr; // [N, n_token, hidden_size] auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); + ggml_tensor* attention_mask = nullptr; + if (!mask.empty()) { + attention_mask = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, mask.size(), mask.size()); + ggml_ext_tensor_iter(attention_mask, [&](ggml_tensor* attention_mask, int64_t i0, int64_t i1, int64_t i2, int64_t i3) { + float value = 0.f; + if (mask[i0] == 0.f) { + value = -INFINITY; + } else if (i0 > i1) { + value = -INFINITY; + } + ggml_ext_tensor_set_f32(attention_mask, value, i0, i1, i2, i3); + }); + } + llm->compute(n_threads, input_ids, + attention_mask, image_embeds, out_layers, &hidden_states, @@ -1860,11 +1947,6 @@ struct LLMEmbedder : public Conditioner { GGML_ASSERT(hidden_states->ne[1] > prompt_template_encode_start_idx); - int64_t min_length = 0; - if (sd_version_is_flux2(version)) { - min_length = 512; - } - int64_t zero_pad_len = 0; if (min_length > 0) { if (hidden_states->ne[1] - prompt_template_encode_start_idx < min_length) { @@ -1886,11 +1968,186 @@ struct LLMEmbedder : public Conditioner { ggml_ext_tensor_set_f32(new_hidden_states, value, i0, i1, i2, i3); }); - // print_ggml_tensor(new_hidden_states); + return new_hidden_states; + } + + SDCondition get_learned_condition(ggml_context* work_ctx, + int n_threads, + const ConditionerParams& conditioner_params) override { + std::string prompt; + std::pair prompt_attn_range; + std::vector extra_prompts; + std::vector> extra_prompts_attn_range; + std::vector> image_embeds; + int prompt_template_encode_start_idx = 34; + int max_length = 0; // pad tokens + int min_length = 0; // zero pad hidden_states + std::set out_layers; + + int64_t t0 = ggml_time_ms(); + + if (sd_version_is_qwen_image(version)) { + if (llm->enable_vision && !conditioner_params.ref_images.empty()) { + LOG_INFO("QwenImageEditPlusPipeline"); + prompt_template_encode_start_idx = 64; + int image_embed_idx = 64 + 6; + + int min_pixels = 384 * 384; + int max_pixels = 560 * 560; + std::string placeholder = "<|image_pad|>"; + std::string img_prompt; + + for (int i = 0; i < conditioner_params.ref_images.size(); i++) { + sd_image_f32_t image = sd_image_t_to_sd_image_f32_t(*conditioner_params.ref_images[i]); + double factor = llm->params.vision.patch_size * llm->params.vision.spatial_merge_size; + int height = image.height; + int width = image.width; + int h_bar = static_cast(std::round(height / factor) * factor); + int w_bar = static_cast(std::round(width / factor) * factor); + + if (static_cast(h_bar) * w_bar > max_pixels) { + double beta = std::sqrt((height * width) / static_cast(max_pixels)); + h_bar = std::max(static_cast(factor), + static_cast(std::floor(height / beta / factor)) * static_cast(factor)); + w_bar = std::max(static_cast(factor), + static_cast(std::floor(width / beta / factor)) * static_cast(factor)); + } else if (static_cast(h_bar) * w_bar < min_pixels) { + double beta = std::sqrt(static_cast(min_pixels) / (height * width)); + h_bar = static_cast(std::ceil(height * beta / factor)) * static_cast(factor); + w_bar = static_cast(std::ceil(width * beta / factor)) * static_cast(factor); + } + + LOG_DEBUG("resize conditioner ref image %d from %dx%d to %dx%d", i, image.height, image.width, h_bar, w_bar); + + sd_image_f32_t resized_image = clip_preprocess(image, w_bar, h_bar); + free(image.data); + image.data = nullptr; + + ggml_tensor* image_tensor = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, resized_image.width, resized_image.height, 3, 1); + sd_image_f32_to_ggml_tensor(resized_image, image_tensor, false); + free(resized_image.data); + resized_image.data = nullptr; + + ggml_tensor* image_embed = nullptr; + llm->encode_image(n_threads, image_tensor, &image_embed, work_ctx); + image_embeds.emplace_back(image_embed_idx, image_embed); + image_embed_idx += 1 + static_cast(image_embed->ne[1]) + 6; + + img_prompt += "Picture " + std::to_string(i + 1) + ": <|vision_start|>"; // [24669, 220, index, 25, 220, 151652] + int64_t num_image_tokens = image_embed->ne[1]; + img_prompt.reserve(num_image_tokens * placeholder.size()); + for (int j = 0; j < num_image_tokens; j++) { + img_prompt += placeholder; + } + img_prompt += "<|vision_end|>"; + } + + prompt = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n"; + prompt += img_prompt; + + prompt_attn_range.first = static_cast(prompt.size()); + prompt += conditioner_params.text; + prompt_attn_range.second = static_cast(prompt.size()); + + prompt += "<|im_end|>\n<|im_start|>assistant\n"; + } else { + prompt_template_encode_start_idx = 34; + + prompt = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n"; + + prompt_attn_range.first = static_cast(prompt.size()); + prompt += conditioner_params.text; + prompt_attn_range.second = static_cast(prompt.size()); + + prompt += "<|im_end|>\n<|im_start|>assistant\n"; + } + } else if (version == VERSION_FLUX2) { + prompt_template_encode_start_idx = 0; + min_length = 512; + out_layers = {10, 20, 30}; + + prompt = "[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]"; + + prompt_attn_range.first = static_cast(prompt.size()); + prompt += conditioner_params.text; + prompt_attn_range.second = static_cast(prompt.size()); + + prompt += "[/INST]"; + } else if (sd_version_is_z_image(version)) { + prompt_template_encode_start_idx = 0; + out_layers = {35}; // -2 + + if (!conditioner_params.ref_images.empty()) { + LOG_INFO("ZImageOmniPipeline"); + prompt = "<|im_start|>user\n<|vision_start|>"; + for (int i = 0; i < conditioner_params.ref_images.size() - 1; i++) { + extra_prompts.push_back("<|vision_end|><|vision_start|>"); + } + extra_prompts.push_back("<|vision_end|>" + conditioner_params.text + "<|im_end|>\n<|im_start|>assistant\n<|vision_start|>"); + extra_prompts.push_back("<|vision_end|><|im_end|>"); + } else { + prompt = "<|im_start|>user\n"; + + prompt_attn_range.first = static_cast(prompt.size()); + prompt += conditioner_params.text; + prompt_attn_range.second = static_cast(prompt.size()); + + prompt += "<|im_end|>\n<|im_start|>assistant\n"; + } + } else if (version == VERSION_FLUX2_KLEIN) { + prompt_template_encode_start_idx = 0; + max_length = 512; + out_layers = {9, 18, 27}; + + prompt = "<|im_start|>user\n"; + + prompt_attn_range.first = static_cast(prompt.size()); + prompt += conditioner_params.text; + prompt_attn_range.second = static_cast(prompt.size()); + + prompt += "<|im_end|>\n<|im_start|>assistant\n\n\n\n\n"; + } else if (version == VERSION_OVIS_IMAGE) { + prompt_template_encode_start_idx = 28; + max_length = prompt_template_encode_start_idx + 256; + + prompt = "<|im_start|>user\nDescribe the image by detailing the color, quantity, text, shape, size, texture, spatial relationships of the objects and background:"; + + prompt_attn_range.first = static_cast(prompt.size()); + prompt += " " + conditioner_params.text; + prompt_attn_range.second = static_cast(prompt.size()); + + prompt += "<|im_end|>\n<|im_start|>assistant\n\n\n\n\n"; + } else { + GGML_ABORT("unknown version %d", version); + } + + auto hidden_states = encode_prompt(work_ctx, + n_threads, + prompt, + prompt_attn_range, + max_length, + min_length, + image_embeds, + out_layers, + prompt_template_encode_start_idx); + + std::vector extra_hidden_states_vec; + for (int i = 0; i < extra_prompts.size(); i++) { + auto extra_hidden_states = encode_prompt(work_ctx, + n_threads, + extra_prompts[i], + extra_prompts_attn_range[i], + max_length, + min_length, + image_embeds, + out_layers, + prompt_template_encode_start_idx); + extra_hidden_states_vec.push_back(extra_hidden_states); + } int64_t t1 = ggml_time_ms(); LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0); - return {new_hidden_states, nullptr, nullptr}; + return {hidden_states, nullptr, nullptr, extra_hidden_states_vec}; } }; diff --git a/control.hpp b/src/control.hpp similarity index 99% rename from control.hpp rename to src/control.hpp index f7842021..5bab0381 100644 --- a/control.hpp +++ b/src/control.hpp @@ -1,8 +1,7 @@ #ifndef __CONTROL_HPP__ #define __CONTROL_HPP__ -#include "common.hpp" -#include "ggml_extend.hpp" +#include "common_block.hpp" #include "model.h" #define CONTROL_NET_GRAPH_SIZE 1536 diff --git a/denoiser.hpp b/src/denoiser.hpp similarity index 81% rename from denoiser.hpp rename to src/denoiser.hpp index 32f40278..40bd7cb7 100644 --- a/denoiser.hpp +++ b/src/denoiser.hpp @@ -1,6 +1,8 @@ #ifndef __DENOISER_HPP__ #define __DENOISER_HPP__ +#include + #include "ggml_extend.hpp" #include "gits_noise.inl" @@ -245,7 +247,7 @@ struct SGMUniformScheduler : SigmaScheduler { int t_max = TIMESTEPS - 1; int t_min = 0; std::vector timesteps = linear_space(static_cast(t_max), static_cast(t_min), n + 1); - for (int i = 0; i < n; i++) { + for (uint32_t i = 0; i < n; i++) { result.push_back(t_to_sigma_func(timesteps[i])); } result.push_back(0.0f); @@ -259,11 +261,11 @@ struct LCMScheduler : SigmaScheduler { result.reserve(n + 1); const int original_steps = 50; const int k = TIMESTEPS / original_steps; - for (int i = 0; i < n; i++) { + for (uint32_t i = 0; i < n; i++) { // the rounding ensures we match the training schedule of the LCM model int index = (i * original_steps) / n; int timestep = (original_steps - index) * k - 1; - result.push_back(t_to_sigma(timestep)); + result.push_back(t_to_sigma(static_cast(timestep))); } result.push_back(0.0f); return result; @@ -276,6 +278,10 @@ struct KarrasScheduler : SigmaScheduler { // but does anybody ever bother to touch them? float rho = 7.f; + if (sigma_min <= 1e-6f) { + sigma_min = 1e-6f; + } + std::vector result(n + 1); float min_inv_rho = pow(sigma_min, (1.f / rho)); @@ -347,6 +353,130 @@ struct SmoothStepScheduler : SigmaScheduler { } }; +struct BongTangentScheduler : SigmaScheduler { + static constexpr float kPi = 3.14159265358979323846f; + + static std::vector get_bong_tangent_sigmas(int steps, float slope, float pivot, float start, float end) { + std::vector sigmas; + if (steps <= 0) { + return sigmas; + } + + float smax = ((2.0f / kPi) * atanf(-slope * (0.0f - pivot)) + 1.0f) * 0.5f; + float smin = ((2.0f / kPi) * atanf(-slope * ((float)(steps - 1) - pivot)) + 1.0f) * 0.5f; + float srange = smax - smin; + float sscale = start - end; + + sigmas.reserve(steps); + + if (fabsf(srange) < 1e-8f) { + if (steps == 1) { + sigmas.push_back(start); + return sigmas; + } + for (int i = 0; i < steps; ++i) { + float t = (float)i / (float)(steps - 1); + sigmas.push_back(start + (end - start) * t); + } + return sigmas; + } + + float inv_srange = 1.0f / srange; + for (int x = 0; x < steps; ++x) { + float v = ((2.0f / kPi) * atanf(-slope * ((float)x - pivot)) + 1.0f) * 0.5f; + float sigma = ((v - smin) * inv_srange) * sscale + end; + sigmas.push_back(sigma); + } + + return sigmas; + } + + std::vector get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t /*t_to_sigma*/) override { + std::vector result; + if (n == 0) { + return result; + } + + float start = sigma_max; + float end = sigma_min; + float middle = sigma_min + (sigma_max - sigma_min) * 0.5f; + + float pivot_1 = 0.6f; + float pivot_2 = 0.6f; + float slope_1 = 0.2f; + float slope_2 = 0.2f; + + int steps = static_cast(n) + 2; + int midpoint = static_cast(((float)steps * pivot_1 + (float)steps * pivot_2) * 0.5f); + int pivot_1_i = static_cast((float)steps * pivot_1); + int pivot_2_i = static_cast((float)steps * pivot_2); + + float slope_scale = (float)steps / 40.0f; + slope_1 = slope_1 / slope_scale; + slope_2 = slope_2 / slope_scale; + + int stage_2_len = steps - midpoint; + int stage_1_len = steps - stage_2_len; + + std::vector sigmas_1 = get_bong_tangent_sigmas(stage_1_len, slope_1, (float)pivot_1_i, start, middle); + std::vector sigmas_2 = get_bong_tangent_sigmas(stage_2_len, slope_2, (float)(pivot_2_i - stage_1_len), middle, end); + + if (!sigmas_1.empty()) { + sigmas_1.pop_back(); + } + + result.reserve(n + 1); + result.insert(result.end(), sigmas_1.begin(), sigmas_1.end()); + result.insert(result.end(), sigmas_2.begin(), sigmas_2.end()); + + if (result.size() < n + 1) { + while (result.size() < n + 1) { + result.push_back(end); + } + } else if (result.size() > n + 1) { + result.resize(n + 1); + } + + result[n] = 0.0f; + return result; + } +}; + +struct KLOptimalScheduler : SigmaScheduler { + std::vector get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) override { + std::vector sigmas; + + if (n == 0) { + return sigmas; + } + + if (n == 1) { + sigmas.push_back(sigma_max); + sigmas.push_back(0.0f); + return sigmas; + } + + if (sigma_min <= 1e-6f) { + sigma_min = 1e-6f; + } + + sigmas.reserve(n + 1); + + float alpha_min = std::atan(sigma_min); + float alpha_max = std::atan(sigma_max); + + for (uint32_t i = 0; i < n; ++i) { + float t = static_cast(i) / static_cast(n - 1); + float angle = t * alpha_min + (1.0f - t) * alpha_max; + sigmas.push_back(std::tan(angle)); + } + + sigmas.push_back(0.0f); + + return sigmas; + } +}; + struct Denoiser { virtual float sigma_min() = 0; virtual float sigma_max() = 0; @@ -392,6 +522,14 @@ struct Denoiser { LOG_INFO("get_sigmas with SmoothStep scheduler"); scheduler = std::make_shared(); break; + case BONG_TANGENT_SCHEDULER: + LOG_INFO("get_sigmas with bong_tangent scheduler"); + scheduler = std::make_shared(); + break; + case KL_OPTIMAL_SCHEDULER: + LOG_INFO("get_sigmas with KL Optimal scheduler"); + scheduler = std::make_shared(); + break; case LCM_SCHEDULER: LOG_INFO("get_sigmas with LCM scheduler"); scheduler = std::make_shared(); @@ -482,8 +620,8 @@ struct CompVisVDenoiser : public CompVisDenoiser { }; struct EDMVDenoiser : public CompVisVDenoiser { - float min_sigma = 0.002; - float max_sigma = 120.0; + float min_sigma = 0.002f; + float max_sigma = 120.0f; EDMVDenoiser(float min_sigma = 0.002, float max_sigma = 120.0) : min_sigma(min_sigma), max_sigma(max_sigma) { @@ -494,7 +632,7 @@ struct EDMVDenoiser : public CompVisVDenoiser { } float sigma_to_t(float s) override { - return 0.25 * std::log(s); + return 0.25f * std::log(s); } float sigma_min() override { @@ -519,17 +657,21 @@ struct DiscreteFlowDenoiser : public Denoiser { float sigma_data = 1.0f; - DiscreteFlowDenoiser(float shift = 3.0f) - : shift(shift) { - set_parameters(); + DiscreteFlowDenoiser(float shift = 3.0f) { + set_shift(shift); } void set_parameters() { for (int i = 1; i < TIMESTEPS + 1; i++) { - sigmas[i - 1] = t_to_sigma(i); + sigmas[i - 1] = t_to_sigma(static_cast(i)); } } + void set_shift(float shift) { + this->shift = shift; + set_parameters(); + } + float sigma_min() override { return sigmas[0]; } @@ -569,37 +711,11 @@ struct DiscreteFlowDenoiser : public Denoiser { }; float flux_time_shift(float mu, float sigma, float t) { - return std::exp(mu) / (std::exp(mu) + std::pow((1.0 / t - 1.0), sigma)); + return ::expf(mu) / (::expf(mu) + ::powf((1.0f / t - 1.0f), sigma)); } -struct FluxFlowDenoiser : public Denoiser { - float sigmas[TIMESTEPS]; - float shift = 1.15f; - - float sigma_data = 1.0f; - - FluxFlowDenoiser(float shift = 1.15f) { - set_parameters(shift); - } - - void set_shift(float shift) { - this->shift = shift; - } - - void set_parameters(float shift) { - set_shift(shift); - for (int i = 0; i < TIMESTEPS; i++) { - sigmas[i] = t_to_sigma(i); - } - } - - float sigma_min() override { - return sigmas[0]; - } - - float sigma_max() override { - return sigmas[TIMESTEPS - 1]; - } +struct FluxFlowDenoiser : public DiscreteFlowDenoiser { + FluxFlowDenoiser() = default; float sigma_to_t(float sigma) override { return sigma; @@ -609,26 +725,6 @@ struct FluxFlowDenoiser : public Denoiser { t = t + 1; return flux_time_shift(shift, 1.0f, t / TIMESTEPS); } - - std::vector get_scalings(float sigma) override { - float c_skip = 1.0f; - float c_out = -sigma; - float c_in = 1.0f; - return {c_skip, c_out, c_in}; - } - - // this function will modify noise/latent - ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) override { - ggml_ext_tensor_scale_inplace(noise, sigma); - ggml_ext_tensor_scale_inplace(latent, 1.0f - sigma); - ggml_ext_tensor_add_inplace(latent, noise); - return latent; - } - - ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) override { - ggml_ext_tensor_scale_inplace(latent, 1.0f / (1.0f - sigma)); - return latent; - } }; struct Flux2FlowDenoiser : public FluxFlowDenoiser { @@ -830,7 +926,7 @@ static bool sample_k_diffusion(sample_method_t method, for (int i = 0; i < steps; i++) { // denoise - ggml_tensor* denoised = model(x, sigmas[i], i + 1); + ggml_tensor* denoised = model(x, sigmas[i], -(i + 1)); if (denoised == nullptr) { return false; } @@ -888,7 +984,7 @@ static bool sample_k_diffusion(sample_method_t method, for (int i = 0; i < steps; i++) { // denoise - ggml_tensor* denoised = model(x, sigmas[i], i + 1); + ggml_tensor* denoised = model(x, sigmas[i], -(i + 1)); if (denoised == nullptr) { return false; } @@ -1284,15 +1380,12 @@ static bool sample_k_diffusion(sample_method_t method, // - pred_sample_direction -> "direction pointing to // x_t" // - pred_prev_sample -> "x_t-1" - int timestep = - roundf(TIMESTEPS - - i * ((float)TIMESTEPS / steps)) - - 1; + int timestep = static_cast(roundf(TIMESTEPS - i * ((float)TIMESTEPS / steps))) - 1; // 1. get previous step value (=t-1) - int prev_timestep = timestep - TIMESTEPS / steps; + int prev_timestep = timestep - TIMESTEPS / static_cast(steps); // The sigma here is chosen to cause the // CompVisDenoiser to produce t = timestep - float sigma = compvis_sigmas[timestep]; + float sigma = static_cast(compvis_sigmas[timestep]); if (i == 0) { // The function add_noise intializes x to // Diffusers' latents * sigma (as in Diffusers' @@ -1349,10 +1442,10 @@ static bool sample_k_diffusion(sample_method_t method, } } // 2. compute alphas, betas - float alpha_prod_t = alphas_cumprod[timestep]; + float alpha_prod_t = static_cast(alphas_cumprod[timestep]); // Note final_alpha_cumprod = alphas_cumprod[0] due to // trailing timestep spacing - float alpha_prod_t_prev = prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]; + float alpha_prod_t_prev = static_cast(prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]); float beta_prod_t = 1 - alpha_prod_t; // 3. compute predicted original sample from predicted // noise also called "predicted x_0" of formula (12) @@ -1399,8 +1492,8 @@ static bool sample_k_diffusion(sample_method_t method, // Two step inner loop without an explicit // tensor float pred_sample_direction = - std::sqrt(1 - alpha_prod_t_prev - - std::pow(std_dev_t, 2)) * + ::sqrtf(1 - alpha_prod_t_prev - + ::powf(std_dev_t, 2)) * vec_model_output[j]; vec_x[j] = std::sqrt(alpha_prod_t_prev) * vec_pred_original_sample[j] + @@ -1475,7 +1568,7 @@ static bool sample_k_diffusion(sample_method_t method, // Begin k-diffusion specific workaround for // evaluating F_theta(x; ...) from D(x, sigma), same // as in DDIM (and see there for detailed comments) - float sigma = compvis_sigmas[timestep]; + float sigma = static_cast(compvis_sigmas[timestep]); if (i == 0) { float* vec_x = (float*)x->data; for (int j = 0; j < ggml_nelements(x); j++) { @@ -1514,14 +1607,14 @@ static bool sample_k_diffusion(sample_method_t method, // is different from the notation alpha_t in // DPM-Solver. In fact, we have alpha_{t_n} = // \sqrt{\hat{alpha_n}}, [...]" - float alpha_prod_t = alphas_cumprod[timestep]; + float alpha_prod_t = static_cast(alphas_cumprod[timestep]); float beta_prod_t = 1 - alpha_prod_t; // Note final_alpha_cumprod = alphas_cumprod[0] since // TCD is always "trailing" - float alpha_prod_t_prev = prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]; + float alpha_prod_t_prev = static_cast(prev_timestep >= 0 ? alphas_cumprod[prev_timestep] : alphas_cumprod[0]); // The subscript _s are the only portion in this // section (2) unique to TCD - float alpha_prod_s = alphas_cumprod[timestep_s]; + float alpha_prod_s = static_cast(alphas_cumprod[timestep_s]); float beta_prod_s = 1 - alpha_prod_s; // 3. Compute the predicted noised sample x_s based on // the model parameterization @@ -1594,6 +1687,216 @@ static bool sample_k_diffusion(sample_method_t method, } } } break; + case RES_MULTISTEP_SAMPLE_METHOD: // Res Multistep sampler + { + struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x); + struct ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x); + + bool have_old_sigma = false; + float old_sigma_down = 0.0f; + + auto t_fn = [](float sigma) -> float { return -logf(sigma); }; + auto sigma_fn = [](float t) -> float { return expf(-t); }; + auto phi1_fn = [](float t) -> float { + if (fabsf(t) < 1e-6f) { + return 1.0f + t * 0.5f + (t * t) / 6.0f; + } + return (expf(t) - 1.0f) / t; + }; + auto phi2_fn = [&](float t) -> float { + if (fabsf(t) < 1e-6f) { + return 0.5f + t / 6.0f + (t * t) / 24.0f; + } + float phi1_val = phi1_fn(t); + return (phi1_val - 1.0f) / t; + }; + + for (int i = 0; i < steps; i++) { + ggml_tensor* denoised = model(x, sigmas[i], i + 1); + if (denoised == nullptr) { + return false; + } + + float sigma_from = sigmas[i]; + float sigma_to = sigmas[i + 1]; + float sigma_up = 0.0f; + float sigma_down = sigma_to; + + if (eta > 0.0f) { + float sigma_from_sq = sigma_from * sigma_from; + float sigma_to_sq = sigma_to * sigma_to; + if (sigma_from_sq > 0.0f) { + float term = sigma_to_sq * (sigma_from_sq - sigma_to_sq) / sigma_from_sq; + if (term > 0.0f) { + sigma_up = eta * std::sqrt(term); + } + } + sigma_up = std::min(sigma_up, sigma_to); + float sigma_down_sq = sigma_to_sq - sigma_up * sigma_up; + sigma_down = sigma_down_sq > 0.0f ? std::sqrt(sigma_down_sq) : 0.0f; + } + + if (sigma_down == 0.0f || !have_old_sigma) { + float dt = sigma_down - sigma_from; + float* vec_x = (float*)x->data; + float* vec_denoised = (float*)denoised->data; + + for (int j = 0; j < ggml_nelements(x); j++) { + float d = (vec_x[j] - vec_denoised[j]) / sigma_from; + vec_x[j] = vec_x[j] + d * dt; + } + } else { + float t = t_fn(sigma_from); + float t_old = t_fn(old_sigma_down); + float t_next = t_fn(sigma_down); + float t_prev = t_fn(sigmas[i - 1]); + float h = t_next - t; + float c2 = (t_prev - t_old) / h; + + float phi1_val = phi1_fn(-h); + float phi2_val = phi2_fn(-h); + float b1 = phi1_val - phi2_val / c2; + float b2 = phi2_val / c2; + + if (!std::isfinite(b1)) { + b1 = 0.0f; + } + if (!std::isfinite(b2)) { + b2 = 0.0f; + } + + float sigma_h = sigma_fn(h); + float* vec_x = (float*)x->data; + float* vec_denoised = (float*)denoised->data; + float* vec_old_denoised = (float*)old_denoised->data; + + for (int j = 0; j < ggml_nelements(x); j++) { + vec_x[j] = sigma_h * vec_x[j] + h * (b1 * vec_denoised[j] + b2 * vec_old_denoised[j]); + } + } + + if (sigmas[i + 1] > 0 && sigma_up > 0.0f) { + ggml_ext_im_set_randn_f32(noise, rng); + float* vec_x = (float*)x->data; + float* vec_noise = (float*)noise->data; + + for (int j = 0; j < ggml_nelements(x); j++) { + vec_x[j] = vec_x[j] + vec_noise[j] * sigma_up; + } + } + + float* vec_old_denoised = (float*)old_denoised->data; + float* vec_denoised = (float*)denoised->data; + for (int j = 0; j < ggml_nelements(x); j++) { + vec_old_denoised[j] = vec_denoised[j]; + } + + old_sigma_down = sigma_down; + have_old_sigma = true; + } + } break; + case RES_2S_SAMPLE_METHOD: // Res 2s sampler + { + struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x); + struct ggml_tensor* x0 = ggml_dup_tensor(work_ctx, x); + struct ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x); + + const float c2 = 0.5f; + auto t_fn = [](float sigma) -> float { return -logf(sigma); }; + auto phi1_fn = [](float t) -> float { + if (fabsf(t) < 1e-6f) { + return 1.0f + t * 0.5f + (t * t) / 6.0f; + } + return (expf(t) - 1.0f) / t; + }; + auto phi2_fn = [&](float t) -> float { + if (fabsf(t) < 1e-6f) { + return 0.5f + t / 6.0f + (t * t) / 24.0f; + } + float phi1_val = phi1_fn(t); + return (phi1_val - 1.0f) / t; + }; + + for (int i = 0; i < steps; i++) { + float sigma_from = sigmas[i]; + float sigma_to = sigmas[i + 1]; + + ggml_tensor* denoised = model(x, sigma_from, -(i + 1)); + if (denoised == nullptr) { + return false; + } + + float sigma_up = 0.0f; + float sigma_down = sigma_to; + if (eta > 0.0f) { + float sigma_from_sq = sigma_from * sigma_from; + float sigma_to_sq = sigma_to * sigma_to; + if (sigma_from_sq > 0.0f) { + float term = sigma_to_sq * (sigma_from_sq - sigma_to_sq) / sigma_from_sq; + if (term > 0.0f) { + sigma_up = eta * std::sqrt(term); + } + } + sigma_up = std::min(sigma_up, sigma_to); + float sigma_down_sq = sigma_to_sq - sigma_up * sigma_up; + sigma_down = sigma_down_sq > 0.0f ? std::sqrt(sigma_down_sq) : 0.0f; + } + + float* vec_x = (float*)x->data; + float* vec_x0 = (float*)x0->data; + for (int j = 0; j < ggml_nelements(x); j++) { + vec_x0[j] = vec_x[j]; + } + + if (sigma_down == 0.0f || sigma_from == 0.0f) { + float* vec_denoised = (float*)denoised->data; + for (int j = 0; j < ggml_nelements(x); j++) { + vec_x[j] = vec_denoised[j]; + } + } else { + float t = t_fn(sigma_from); + float t_next = t_fn(sigma_down); + float h = t_next - t; + + float a21 = c2 * phi1_fn(-h * c2); + float phi1_val = phi1_fn(-h); + float phi2_val = phi2_fn(-h); + float b2 = phi2_val / c2; + float b1 = phi1_val - b2; + + float sigma_c2 = expf(-(t + h * c2)); + + float* vec_denoised = (float*)denoised->data; + float* vec_x2 = (float*)x2->data; + for (int j = 0; j < ggml_nelements(x); j++) { + float eps1 = vec_denoised[j] - vec_x0[j]; + vec_x2[j] = vec_x0[j] + h * a21 * eps1; + } + + ggml_tensor* denoised2 = model(x2, sigma_c2, i + 1); + if (denoised2 == nullptr) { + return false; + } + float* vec_denoised2 = (float*)denoised2->data; + + for (int j = 0; j < ggml_nelements(x); j++) { + float eps1 = vec_denoised[j] - vec_x0[j]; + float eps2 = vec_denoised2[j] - vec_x0[j]; + vec_x[j] = vec_x0[j] + h * (b1 * eps1 + b2 * eps2); + } + } + + if (sigmas[i + 1] > 0 && sigma_up > 0.0f) { + ggml_ext_im_set_randn_f32(noise, rng); + float* vec_x = (float*)x->data; + float* vec_noise = (float*)noise->data; + + for (int j = 0; j < ggml_nelements(x); j++) { + vec_x[j] = vec_x[j] + vec_noise[j] * sigma_up; + } + } + } + } break; default: LOG_ERROR("Attempting to sample with nonexisting sample method %i", method); diff --git a/diffusion_model.hpp b/src/diffusion_model.hpp similarity index 79% rename from diffusion_model.hpp rename to src/diffusion_model.hpp index 8c741fdc..329bb9d9 100644 --- a/diffusion_model.hpp +++ b/src/diffusion_model.hpp @@ -1,6 +1,7 @@ #ifndef __DIFFUSION_MODEL_H__ #define __DIFFUSION_MODEL_H__ +#include "anima.hpp" #include "flux.hpp" #include "mmdit.hpp" #include "qwen_image.hpp" @@ -37,8 +38,9 @@ struct DiffusionModel { virtual void get_param_tensors(std::map& tensors) = 0; virtual size_t get_params_buffer_size() = 0; virtual void set_weight_adapter(const std::shared_ptr& adapter){}; - virtual int64_t get_adm_in_channels() = 0; - virtual void set_flash_attn_enabled(bool enabled) = 0; + virtual int64_t get_adm_in_channels() = 0; + virtual void set_flash_attention_enabled(bool enabled) = 0; + virtual void set_circular_axes(bool circular_x, bool circular_y) = 0; }; struct UNetModel : public DiffusionModel { @@ -83,10 +85,14 @@ struct UNetModel : public DiffusionModel { return unet.unet.adm_in_channels; } - void set_flash_attn_enabled(bool enabled) { + void set_flash_attention_enabled(bool enabled) { unet.set_flash_attention_enabled(enabled); } + void set_circular_axes(bool circular_x, bool circular_y) override { + unet.set_circular_axes(circular_x, circular_y); + } + bool compute(int n_threads, DiffusionParams diffusion_params, struct ggml_tensor** output = nullptr, @@ -144,10 +150,14 @@ struct MMDiTModel : public DiffusionModel { return 768 + 1280; } - void set_flash_attn_enabled(bool enabled) { + void set_flash_attention_enabled(bool enabled) { mmdit.set_flash_attention_enabled(enabled); } + void set_circular_axes(bool circular_x, bool circular_y) override { + mmdit.set_circular_axes(circular_x, circular_y); + } + bool compute(int n_threads, DiffusionParams diffusion_params, struct ggml_tensor** output = nullptr, @@ -206,10 +216,14 @@ struct FluxModel : public DiffusionModel { return 768; } - void set_flash_attn_enabled(bool enabled) { + void set_flash_attention_enabled(bool enabled) { flux.set_flash_attention_enabled(enabled); } + void set_circular_axes(bool circular_x, bool circular_y) override { + flux.set_circular_axes(circular_x, circular_y); + } + bool compute(int n_threads, DiffusionParams diffusion_params, struct ggml_tensor** output = nullptr, @@ -229,6 +243,72 @@ struct FluxModel : public DiffusionModel { } }; +struct AnimaModel : public DiffusionModel { + std::string prefix; + Anima::AnimaRunner anima; + + AnimaModel(ggml_backend_t backend, + bool offload_params_to_cpu, + const String2TensorStorage& tensor_storage_map = {}, + const std::string prefix = "model.diffusion_model") + : prefix(prefix), anima(backend, offload_params_to_cpu, tensor_storage_map, prefix) { + } + + std::string get_desc() override { + return anima.get_desc(); + } + + void alloc_params_buffer() override { + anima.alloc_params_buffer(); + } + + void free_params_buffer() override { + anima.free_params_buffer(); + } + + void free_compute_buffer() override { + anima.free_compute_buffer(); + } + + void get_param_tensors(std::map& tensors) override { + anima.get_param_tensors(tensors, prefix); + } + + size_t get_params_buffer_size() override { + return anima.get_params_buffer_size(); + } + + void set_weight_adapter(const std::shared_ptr& adapter) override { + anima.set_weight_adapter(adapter); + } + + int64_t get_adm_in_channels() override { + return 768; + } + + void set_flash_attention_enabled(bool enabled) { + anima.set_flash_attention_enabled(enabled); + } + + void set_circular_axes(bool circular_x, bool circular_y) override { + anima.set_circular_axes(circular_x, circular_y); + } + + bool compute(int n_threads, + DiffusionParams diffusion_params, + struct ggml_tensor** output = nullptr, + struct ggml_context* output_ctx = nullptr) override { + return anima.compute(n_threads, + diffusion_params.x, + diffusion_params.timesteps, + diffusion_params.context, + diffusion_params.c_concat, + diffusion_params.y, + output, + output_ctx); + } +}; + struct WanModel : public DiffusionModel { std::string prefix; WAN::WanRunner wan; @@ -273,10 +353,14 @@ struct WanModel : public DiffusionModel { return 768; } - void set_flash_attn_enabled(bool enabled) { + void set_flash_attention_enabled(bool enabled) { wan.set_flash_attention_enabled(enabled); } + void set_circular_axes(bool circular_x, bool circular_y) override { + wan.set_circular_axes(circular_x, circular_y); + } + bool compute(int n_threads, DiffusionParams diffusion_params, struct ggml_tensor** output = nullptr, @@ -303,8 +387,9 @@ struct QwenImageModel : public DiffusionModel { bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "model.diffusion_model", - SDVersion version = VERSION_QWEN_IMAGE) - : prefix(prefix), qwen_image(backend, offload_params_to_cpu, tensor_storage_map, prefix, version) { + SDVersion version = VERSION_QWEN_IMAGE, + bool zero_cond_t = false) + : prefix(prefix), qwen_image(backend, offload_params_to_cpu, tensor_storage_map, prefix, version, zero_cond_t) { } std::string get_desc() override { @@ -339,10 +424,14 @@ struct QwenImageModel : public DiffusionModel { return 768; } - void set_flash_attn_enabled(bool enabled) { + void set_flash_attention_enabled(bool enabled) { qwen_image.set_flash_attention_enabled(enabled); } + void set_circular_axes(bool circular_x, bool circular_y) override { + qwen_image.set_circular_axes(circular_x, circular_y); + } + bool compute(int n_threads, DiffusionParams diffusion_params, struct ggml_tensor** output = nullptr, @@ -402,10 +491,14 @@ struct ZImageModel : public DiffusionModel { return 768; } - void set_flash_attn_enabled(bool enabled) { + void set_flash_attention_enabled(bool enabled) { z_image.set_flash_attention_enabled(enabled); } + void set_circular_axes(bool circular_x, bool circular_y) override { + z_image.set_circular_axes(circular_x, circular_y); + } + bool compute(int n_threads, DiffusionParams diffusion_params, struct ggml_tensor** output = nullptr, diff --git a/easycache.hpp b/src/easycache.hpp similarity index 100% rename from easycache.hpp rename to src/easycache.hpp diff --git a/esrgan.hpp b/src/esrgan.hpp similarity index 98% rename from esrgan.hpp rename to src/esrgan.hpp index 961e84f8..f740c2bc 100644 --- a/esrgan.hpp +++ b/src/esrgan.hpp @@ -51,7 +51,7 @@ public: x_cat = ggml_concat(ctx->ggml_ctx, x_cat, x4, 2); auto x5 = conv5->forward(ctx, x_cat); - x5 = ggml_add(ctx->ggml_ctx, ggml_scale(ctx->ggml_ctx, x5, 0.2f), x); + x5 = ggml_add(ctx->ggml_ctx, ggml_ext_scale(ctx->ggml_ctx, x5, 0.2f), x); return x5; } }; @@ -76,7 +76,7 @@ public: out = rdb2->forward(ctx, out); out = rdb3->forward(ctx, out); - out = ggml_add(ctx->ggml_ctx, ggml_scale(ctx->ggml_ctx, out, 0.2f), x); + out = ggml_add(ctx->ggml_ctx, ggml_ext_scale(ctx->ggml_ctx, out, 0.2f), x); return out; } }; diff --git a/flux.hpp b/src/flux.hpp similarity index 83% rename from flux.hpp rename to src/flux.hpp index 1df2874a..1204ae1e 100644 --- a/flux.hpp +++ b/src/flux.hpp @@ -4,7 +4,7 @@ #include #include -#include "ggml_extend.hpp" +#include "common_dit.hpp" #include "model.h" #include "rope.hpp" @@ -103,11 +103,13 @@ namespace Flux { auto norm = std::dynamic_pointer_cast(blocks["norm"]); auto qkv = qkv_proj->forward(ctx, x); - auto qkv_vec = split_qkv(ctx->ggml_ctx, qkv); - int64_t head_dim = qkv_vec[0]->ne[0] / num_heads; - auto q = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[0], head_dim, num_heads, qkv_vec[0]->ne[1], qkv_vec[0]->ne[2]); - auto k = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[1], head_dim, num_heads, qkv_vec[1]->ne[1], qkv_vec[1]->ne[2]); - auto v = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[2], head_dim, num_heads, qkv_vec[2]->ne[1], qkv_vec[2]->ne[2]); + int64_t head_dim = qkv->ne[0] / 3 / num_heads; + auto q = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2], + qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], 0); + auto k = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2], + qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], (qkv->nb[0]) * qkv->ne[0] / 3); + auto v = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2], + qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], (qkv->nb[0]) * 2 * qkv->ne[0] / 3); q = norm->query_norm(ctx, q); k = norm->key_norm(ctx, k); return {q, k, v}; @@ -153,7 +155,7 @@ namespace Flux { if (use_mlp_silu_act) { x = ggml_ext_silu_act(ctx->ggml_ctx, x); } else { - x = ggml_gelu_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); } x = mlp_2->forward(ctx, x); return x; @@ -233,14 +235,17 @@ namespace Flux { __STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* shift, - struct ggml_tensor* scale) { + struct ggml_tensor* scale, + bool skip_reshape = false) { // x: [N, L, C] // scale: [N, C] // shift: [N, C] - scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C] - shift = ggml_reshape_3d(ctx, shift, shift->ne[0], 1, shift->ne[1]); // [N, 1, C] - x = ggml_add(ctx, x, ggml_mul(ctx, x, scale)); - x = ggml_add(ctx, x, shift); + if (!skip_reshape) { + scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C] + shift = ggml_reshape_3d(ctx, shift, shift->ne[0], 1, shift->ne[1]); // [N, 1, C] + } + x = ggml_add(ctx, x, ggml_mul(ctx, x, scale)); + x = ggml_add(ctx, x, shift); return x; } @@ -260,7 +265,7 @@ namespace Flux { bool use_yak_mlp = false, bool use_mlp_silu_act = false) : idx(idx), prune_mod(prune_mod) { - int64_t mlp_hidden_dim = hidden_size * mlp_ratio; + int64_t mlp_hidden_dim = static_cast(hidden_size * mlp_ratio); if (!prune_mod && !share_modulation) { blocks["img_mod"] = std::shared_ptr(new Modulation(hidden_size, true)); @@ -373,26 +378,23 @@ namespace Flux { auto k = ggml_concat(ctx->ggml_ctx, txt_k, img_k, 2); // [N, n_txt_token + n_img_token, n_head, d_head] auto v = ggml_concat(ctx->ggml_ctx, txt_v, img_v, 2); // [N, n_txt_token + n_img_token, n_head, d_head] - auto attn = Rope::attention(ctx, q, k, v, pe, mask); // [N, n_txt_token + n_img_token, n_head*d_head] - attn = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, attn, 0, 2, 1, 3)); // [n_txt_token + n_img_token, N, hidden_size] + auto attn = Rope::attention(ctx, q, k, v, pe, mask); // [N, n_txt_token + n_img_token, n_head*d_head] auto txt_attn_out = ggml_view_3d(ctx->ggml_ctx, attn, attn->ne[0], - attn->ne[1], txt->ne[1], + attn->ne[2], attn->nb[1], attn->nb[2], - 0); // [n_txt_token, N, hidden_size] - txt_attn_out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, txt_attn_out, 0, 2, 1, 3)); // [N, n_txt_token, hidden_size] + 0); // [N, n_txt_token, hidden_size] auto img_attn_out = ggml_view_3d(ctx->ggml_ctx, attn, attn->ne[0], - attn->ne[1], img->ne[1], + attn->ne[2], attn->nb[1], attn->nb[2], - attn->nb[2] * txt->ne[1]); // [n_img_token, N, hidden_size] - img_attn_out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, img_attn_out, 0, 2, 1, 3)); // [N, n_img_token, hidden_size] + txt->ne[1] * attn->nb[1]); // [N, n_img_token, hidden_size] // calculate the img bloks img = ggml_add(ctx->ggml_ctx, img, ggml_mul(ctx->ggml_ctx, img_attn->post_attention(ctx, img_attn_out), img_mod1.gate)); @@ -439,7 +441,7 @@ namespace Flux { if (scale <= 0.f) { scale = 1 / sqrt((float)head_dim); } - mlp_hidden_dim = hidden_size * mlp_ratio; + mlp_hidden_dim = static_cast(hidden_size * mlp_ratio); mlp_mult_factor = 1; if (use_yak_mlp || use_mlp_silu_act) { mlp_mult_factor = 2; @@ -489,43 +491,28 @@ namespace Flux { } auto x_mod = Flux::modulate(ctx->ggml_ctx, pre_norm->forward(ctx, x), mod.shift, mod.scale); - auto qkv_mlp = linear1->forward(ctx, x_mod); // [N, n_token, hidden_size * 3 + mlp_hidden_dim] - qkv_mlp = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, qkv_mlp, 2, 0, 1, 3)); // [hidden_size * 3 + mlp_hidden_dim, N, n_token] + auto qkv_mlp = linear1->forward(ctx, x_mod); // [N, n_token, hidden_size * 3 + mlp_hidden_dim*mlp_mult_factor] - auto qkv = ggml_view_3d(ctx->ggml_ctx, - qkv_mlp, - qkv_mlp->ne[0], - qkv_mlp->ne[1], - hidden_size * 3, - qkv_mlp->nb[1], - qkv_mlp->nb[2], - 0); // [hidden_size * 3 , N, n_token] - qkv = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, qkv, 1, 2, 0, 3)); // [N, n_token, hidden_size * 3] - auto mlp = ggml_view_3d(ctx->ggml_ctx, - qkv_mlp, - qkv_mlp->ne[0], - qkv_mlp->ne[1], - mlp_hidden_dim * mlp_mult_factor, - qkv_mlp->nb[1], - qkv_mlp->nb[2], - qkv_mlp->nb[2] * hidden_size * 3); // [mlp_hidden_dim*mlp_mult_factor , N, n_token] - mlp = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, mlp, 1, 2, 0, 3)); // [N, n_token, mlp_hidden_dim*mlp_mult_factor] - - auto qkv_vec = split_qkv(ctx->ggml_ctx, qkv); // q,k,v: [N, n_token, hidden_size] int64_t head_dim = hidden_size / num_heads; - auto q = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[0], head_dim, num_heads, qkv_vec[0]->ne[1], qkv_vec[0]->ne[2]); // [N, n_token, n_head, d_head] - auto k = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[1], head_dim, num_heads, qkv_vec[1]->ne[1], qkv_vec[1]->ne[2]); // [N, n_token, n_head, d_head] - auto v = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[2], head_dim, num_heads, qkv_vec[2]->ne[1], qkv_vec[2]->ne[2]); // [N, n_token, n_head, d_head] - q = norm->query_norm(ctx, q); - k = norm->key_norm(ctx, k); - auto attn = Rope::attention(ctx, q, k, v, pe, mask); // [N, n_token, hidden_size] + auto q = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2], + qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], 0); + auto k = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2], + qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], (qkv_mlp->nb[0]) * hidden_size); + auto v = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2], + qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], (qkv_mlp->nb[0]) * 2 * hidden_size); + + q = norm->query_norm(ctx, q); + k = norm->key_norm(ctx, k); + auto attn = Rope::attention(ctx, q, k, v, pe, mask); // [N, n_token, hidden_size] + + auto mlp = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, mlp_hidden_dim * mlp_mult_factor, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], hidden_size * 3 * qkv_mlp->nb[0]); if (use_yak_mlp) { mlp = ggml_ext_silu_act(ctx->ggml_ctx, mlp, false); } else if (use_mlp_silu_act) { mlp = ggml_ext_silu_act(ctx->ggml_ctx, mlp); } else { - mlp = ggml_gelu_inplace(ctx->ggml_ctx, mlp); + mlp = ggml_ext_gelu(ctx->ggml_ctx, mlp, true); } auto attn_mlp = ggml_concat(ctx->ggml_ctx, attn, mlp, 0); // [N, n_token, hidden_size + mlp_hidden_dim] auto output = linear2->forward(ctx, attn_mlp); // [N, n_token, hidden_size] @@ -577,13 +564,10 @@ namespace Flux { } else { auto adaLN_modulation_1 = std::dynamic_pointer_cast(blocks["adaLN_modulation.1"]); - auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 2 * hidden_size] - m = ggml_reshape_3d(ctx->ggml_ctx, m, c->ne[0], 2, c->ne[1]); // [N, 2, hidden_size] - m = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, m, 0, 2, 1, 3)); // [2, N, hidden_size] - - int64_t offset = m->nb[1] * m->ne[1]; - shift = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 0); // [N, hidden_size] - scale = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 1); // [N, hidden_size] + auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 2 * hidden_size] + auto m_vec = ggml_ext_chunk(ctx->ggml_ctx, m, 2, 0); + shift = m_vec[0]; // [N, hidden_size] + scale = m_vec[1]; // [N, hidden_size] } x = Flux::modulate(ctx->ggml_ctx, norm_final->forward(ctx, x), shift, scale); @@ -741,36 +725,38 @@ namespace Flux { struct ChromaRadianceParams { int64_t nerf_hidden_size = 64; - int64_t nerf_mlp_ratio = 4; - int64_t nerf_depth = 4; - int64_t nerf_max_freqs = 8; + int nerf_mlp_ratio = 4; + int nerf_depth = 4; + int nerf_max_freqs = 8; + bool use_x0 = false; + bool fake_patch_size_x2 = false; }; struct FluxParams { - SDVersion version = VERSION_FLUX; - bool is_chroma = false; - int64_t patch_size = 2; - int64_t in_channels = 64; - int64_t out_channels = 64; - int64_t vec_in_dim = 768; - int64_t context_in_dim = 4096; - int64_t hidden_size = 3072; - float mlp_ratio = 4.0f; - int64_t num_heads = 24; - int64_t depth = 19; - int64_t depth_single_blocks = 38; - std::vector axes_dim = {16, 56, 56}; - int64_t axes_dim_sum = 128; - int theta = 10000; - bool qkv_bias = true; - bool guidance_embed = true; - int64_t in_dim = 64; - bool disable_bias = false; - bool share_modulation = false; - bool semantic_txt_norm = false; - bool use_yak_mlp = false; - bool use_mlp_silu_act = false; - float ref_index_scale = 1.f; + SDVersion version = VERSION_FLUX; + bool is_chroma = false; + int patch_size = 2; + int64_t in_channels = 64; + int64_t out_channels = 64; + int64_t vec_in_dim = 768; + int64_t context_in_dim = 4096; + int64_t hidden_size = 3072; + float mlp_ratio = 4.0f; + int num_heads = 24; + int depth = 19; + int depth_single_blocks = 38; + std::vector axes_dim = {16, 56, 56}; + int axes_dim_sum = 128; + int theta = 10000; + bool qkv_bias = true; + bool guidance_embed = true; + int64_t in_dim = 64; + bool disable_bias = false; + bool share_modulation = false; + bool semantic_txt_norm = false; + bool use_yak_mlp = false; + bool use_mlp_silu_act = false; + float ref_index_scale = 1.f; ChromaRadianceParams chroma_radiance_params; }; @@ -781,8 +767,11 @@ namespace Flux { Flux(FluxParams params) : params(params) { if (params.version == VERSION_CHROMA_RADIANCE) { - std::pair kernel_size = {(int)params.patch_size, (int)params.patch_size}; - std::pair stride = kernel_size; + std::pair kernel_size = {params.patch_size, params.patch_size}; + if (params.chroma_radiance_params.fake_patch_size_x2) { + kernel_size = {params.patch_size / 2, params.patch_size / 2}; + } + std::pair stride = kernel_size; blocks["img_in_patch"] = std::make_shared(params.in_channels, params.hidden_size, @@ -858,70 +847,6 @@ namespace Flux { } } - struct ggml_tensor* pad_to_patch_size(struct ggml_context* ctx, - struct ggml_tensor* x) { - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - - int pad_h = (params.patch_size - H % params.patch_size) % params.patch_size; - int pad_w = (params.patch_size - W % params.patch_size) % params.patch_size; - x = ggml_pad(ctx, x, pad_w, pad_h, 0, 0); // [N, C, H + pad_h, W + pad_w] - return x; - } - - struct ggml_tensor* patchify(struct ggml_context* ctx, - struct ggml_tensor* x) { - // x: [N, C, H, W] - // return: [N, h*w, C * patch_size * patch_size] - int64_t N = x->ne[3]; - int64_t C = x->ne[2]; - int64_t H = x->ne[1]; - int64_t W = x->ne[0]; - int64_t p = params.patch_size; - int64_t h = H / params.patch_size; - int64_t w = W / params.patch_size; - - GGML_ASSERT(h * p == H && w * p == W); - - x = ggml_reshape_4d(ctx, x, p, w, p, h * C * N); // [N*C*h, p, w, p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, p, p] - x = ggml_reshape_4d(ctx, x, p * p, w * h, C, N); // [N, C, h*w, p*p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, h*w, C, p*p] - x = ggml_reshape_3d(ctx, x, p * p * C, w * h, N); // [N, h*w, C*p*p] - return x; - } - - struct ggml_tensor* process_img(struct ggml_context* ctx, - struct ggml_tensor* x) { - // img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) - x = pad_to_patch_size(ctx, x); - x = patchify(ctx, x); - return x; - } - - struct ggml_tensor* unpatchify(struct ggml_context* ctx, - struct ggml_tensor* x, - int64_t h, - int64_t w) { - // x: [N, h*w, C*patch_size*patch_size] - // return: [N, C, H, W] - int64_t N = x->ne[2]; - int64_t C = x->ne[0] / params.patch_size / params.patch_size; - int64_t H = h * params.patch_size; - int64_t W = w * params.patch_size; - int64_t p = params.patch_size; - - GGML_ASSERT(C * p * p == x->ne[0]); - - x = ggml_reshape_4d(ctx, x, p * p, C, w * h, N); // [N, h*w, C, p*p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, C, h*w, p*p] - x = ggml_reshape_4d(ctx, x, p, p, w, h * C * N); // [N*C*h, w, p, p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, p, w, p] - x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*p, w*p] - - return x; - } - struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx, struct ggml_tensor* img, struct ggml_tensor* txt, @@ -964,7 +889,7 @@ namespace Flux { vec = approx->forward(ctx, vec); // [344, N, hidden_size] if (y != nullptr) { - txt_img_mask = ggml_pad(ctx->ggml_ctx, y, img->ne[1], 0, 0, 0); + txt_img_mask = ggml_pad(ctx->ggml_ctx, y, static_cast(img->ne[1]), 0, 0, 0); } } else { auto time_in = std::dynamic_pointer_cast(blocks["time_in"]); @@ -1026,16 +951,14 @@ namespace Flux { txt_img = block->forward(ctx, txt_img, vec, pe, txt_img_mask, ss_mods); } - txt_img = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, txt_img, 0, 2, 1, 3)); // [n_txt_token + n_img_token, N, hidden_size] - img = ggml_view_3d(ctx->ggml_ctx, - txt_img, - txt_img->ne[0], - txt_img->ne[1], - img->ne[1], - txt_img->nb[1], - txt_img->nb[2], - txt_img->nb[2] * txt->ne[1]); // [n_img_token, N, hidden_size] - img = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, img, 0, 2, 1, 3)); // [N, n_img_token, hidden_size] + img = ggml_view_3d(ctx->ggml_ctx, + txt_img, + txt_img->ne[0], + img->ne[1], + txt_img->ne[2], + txt_img->nb[1], + txt_img->nb[2], + txt->ne[1] * txt_img->nb[1]); // [N, n_img_token, hidden_size] if (final_layer) { img = final_layer->forward(ctx, img, vec); // (N, T, patch_size ** 2 * out_channels) @@ -1044,6 +967,15 @@ namespace Flux { return img; } + struct ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx, + struct ggml_tensor* predicted, + struct ggml_tensor* noisy, + struct ggml_tensor* timesteps) { + auto x = ggml_sub(ctx->ggml_ctx, noisy, predicted); + x = ggml_div(ctx->ggml_ctx, x, timesteps); + return x; + } + struct ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* timestep, @@ -1058,16 +990,23 @@ namespace Flux { std::vector skip_layers = {}) { GGML_ASSERT(x->ne[3] == 1); - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - int64_t C = x->ne[2]; - int64_t patch_size = params.patch_size; - int pad_h = (patch_size - H % patch_size) % patch_size; - int pad_w = (patch_size - W % patch_size) % patch_size; + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; + int64_t C = x->ne[2]; + int patch_size = params.patch_size; + int pad_h = (patch_size - H % patch_size) % patch_size; + int pad_w = (patch_size - W % patch_size) % patch_size; - auto img = pad_to_patch_size(ctx->ggml_ctx, x); + auto img = DiT::pad_to_patch_size(ctx, x, params.patch_size, params.patch_size); auto orig_img = img; + if (params.chroma_radiance_params.fake_patch_size_x2) { + // It's supposed to be using GGML_SCALE_MODE_NEAREST, but this seems more stable + // Maybe the implementation of nearest-neighbor interpolation in ggml behaves differently than the one in PyTorch? + // img = F.interpolate(img, size=(H//2, W//2), mode="nearest") + img = ggml_interpolate(ctx->ggml_ctx, img, W / 2, H / 2, C, x->ne[3], GGML_SCALE_MODE_BILINEAR); + } + auto img_in_patch = std::dynamic_pointer_cast(blocks["img_in_patch"]); img = img_in_patch->forward(ctx, img); // [N, hidden_size, H/patch_size, W/patch_size] @@ -1080,7 +1019,7 @@ namespace Flux { auto nerf_image_embedder = std::dynamic_pointer_cast(blocks["nerf_image_embedder"]); auto nerf_final_layer_conv = std::dynamic_pointer_cast(blocks["nerf_final_layer_conv"]); - auto nerf_pixels = patchify(ctx->ggml_ctx, orig_img); // [N, num_patches, C * patch_size * patch_size] + auto nerf_pixels = DiT::patchify(ctx->ggml_ctx, orig_img, patch_size, patch_size); // [N, num_patches, C * patch_size * patch_size] int64_t num_patches = nerf_pixels->ne[1]; nerf_pixels = ggml_reshape_3d(ctx->ggml_ctx, nerf_pixels, @@ -1100,10 +1039,14 @@ namespace Flux { img_dct = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, img_dct, 1, 0, 2, 3)); // [N*num_patches, nerf_hidden_size, patch_size*patch_size] img_dct = ggml_reshape_3d(ctx->ggml_ctx, img_dct, img_dct->ne[0] * img_dct->ne[1], num_patches, img_dct->ne[2] / num_patches); // [N, num_patches, nerf_hidden_size*patch_size*patch_size] - img_dct = unpatchify(ctx->ggml_ctx, img_dct, (H + pad_h) / patch_size, (W + pad_w) / patch_size); // [N, nerf_hidden_size, H, W] + img_dct = DiT::unpatchify(ctx->ggml_ctx, img_dct, (H + pad_h) / patch_size, (W + pad_w) / patch_size, patch_size, patch_size); // [N, nerf_hidden_size, H, W] out = nerf_final_layer_conv->forward(ctx, img_dct); // [N, C, H, W] + if (params.chroma_radiance_params.use_x0) { + out = _apply_x0_residual(ctx, out, orig_img, timestep); + } + return out; } @@ -1121,23 +1064,23 @@ namespace Flux { std::vector skip_layers = {}) { GGML_ASSERT(x->ne[3] == 1); - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - int64_t C = x->ne[2]; - int64_t patch_size = params.patch_size; - int pad_h = (patch_size - H % patch_size) % patch_size; - int pad_w = (patch_size - W % patch_size) % patch_size; + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; + int64_t C = x->ne[2]; + int patch_size = params.patch_size; + int pad_h = (patch_size - H % patch_size) % patch_size; + int pad_w = (patch_size - W % patch_size) % patch_size; - auto img = process_img(ctx->ggml_ctx, x); - uint64_t img_tokens = img->ne[1]; + auto img = DiT::pad_and_patchify(ctx, x, patch_size, patch_size); + int64_t img_tokens = img->ne[1]; if (params.version == VERSION_FLUX_FILL) { GGML_ASSERT(c_concat != nullptr); ggml_tensor* masked = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0); ggml_tensor* mask = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 8 * 8, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C); - masked = process_img(ctx->ggml_ctx, masked); - mask = process_img(ctx->ggml_ctx, mask); + masked = DiT::pad_and_patchify(ctx, masked, patch_size, patch_size); + mask = DiT::pad_and_patchify(ctx, mask, patch_size, patch_size); img = ggml_concat(ctx->ggml_ctx, img, ggml_concat(ctx->ggml_ctx, masked, mask, 0), 0); } else if (params.version == VERSION_FLEX_2) { @@ -1146,21 +1089,21 @@ namespace Flux { ggml_tensor* mask = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 1, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C); ggml_tensor* control = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * (C + 1)); - masked = process_img(ctx->ggml_ctx, masked); - mask = process_img(ctx->ggml_ctx, mask); - control = process_img(ctx->ggml_ctx, control); + masked = DiT::pad_and_patchify(ctx, masked, patch_size, patch_size); + mask = DiT::pad_and_patchify(ctx, mask, patch_size, patch_size); + control = DiT::pad_and_patchify(ctx, control, patch_size, patch_size); img = ggml_concat(ctx->ggml_ctx, img, ggml_concat(ctx->ggml_ctx, ggml_concat(ctx->ggml_ctx, masked, mask, 0), control, 0), 0); } else if (params.version == VERSION_FLUX_CONTROLS) { GGML_ASSERT(c_concat != nullptr); - auto control = process_img(ctx->ggml_ctx, c_concat); + auto control = DiT::pad_and_patchify(ctx, c_concat, patch_size, patch_size); img = ggml_concat(ctx->ggml_ctx, img, control, 0); } if (ref_latents.size() > 0) { for (ggml_tensor* ref : ref_latents) { - ref = process_img(ctx->ggml_ctx, ref); + ref = DiT::pad_and_patchify(ctx, ref, patch_size, patch_size); img = ggml_concat(ctx->ggml_ctx, img, ref, 1); } } @@ -1168,13 +1111,11 @@ namespace Flux { auto out = forward_orig(ctx, img, context, timestep, y, guidance, pe, mod_index_arange, skip_layers); // [N, num_tokens, C * patch_size * patch_size] if (out->ne[1] > img_tokens) { - out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, out, 0, 2, 1, 3)); // [num_tokens, N, C * patch_size * patch_size] - out = ggml_view_3d(ctx->ggml_ctx, out, out->ne[0], out->ne[1], img_tokens, out->nb[1], out->nb[2], 0); - out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, out, 0, 2, 1, 3)); // [N, h*w, C * patch_size * patch_size] + out = ggml_view_3d(ctx->ggml_ctx, out, out->ne[0], img_tokens, out->ne[2], out->nb[1], out->nb[2], 0); + out = ggml_cont(ctx->ggml_ctx, out); } - // rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2) - out = unpatchify(ctx->ggml_ctx, out, (H + pad_h) / patch_size, (W + pad_w) / patch_size); // [N, C, H + pad_h, W + pad_w] + out = DiT::unpatchify_and_crop(ctx->ggml_ctx, out, H, W, patch_size, patch_size); // [N, C, H, W] return out; } @@ -1263,13 +1204,9 @@ namespace Flux { } else if (version == VERSION_OVIS_IMAGE) { flux_params.semantic_txt_norm = true; flux_params.use_yak_mlp = true; - flux_params.context_in_dim = 2048; flux_params.vec_in_dim = 0; } else if (sd_version_is_flux2(version)) { - flux_params.context_in_dim = 15360; flux_params.in_channels = 128; - flux_params.hidden_size = 6144; - flux_params.num_heads = 48; flux_params.patch_size = 1; flux_params.out_channels = 128; flux_params.mlp_ratio = 3.f; @@ -1282,14 +1219,27 @@ namespace Flux { flux_params.ref_index_scale = 10.f; flux_params.use_mlp_silu_act = true; } + int64_t head_dim = 0; + int64_t actual_radiance_patch_size = -1; for (auto pair : tensor_storage_map) { std::string tensor_name = pair.first; if (!starts_with(tensor_name, prefix)) continue; if (tensor_name.find("guidance_in.in_layer.weight") != std::string::npos) { - // not schnell flux_params.guidance_embed = true; } + if (tensor_name.find("__x0__") != std::string::npos) { + LOG_DEBUG("using x0 prediction"); + flux_params.chroma_radiance_params.use_x0 = true; + } + if (tensor_name.find("__32x32__") != std::string::npos) { + LOG_DEBUG("using patch size 32"); + flux_params.patch_size = 32; + } + if (tensor_name.find("img_in_patch.weight") != std::string::npos) { + actual_radiance_patch_size = pair.second.ne[0]; + LOG_DEBUG("actual radiance patch size: %d", actual_radiance_patch_size); + } if (tensor_name.find("distilled_guidance_layer.in_proj.weight") != std::string::npos) { // Chroma flux_params.is_chroma = true; @@ -1310,13 +1260,35 @@ namespace Flux { flux_params.depth_single_blocks = block_depth + 1; } } + if (ends_with(tensor_name, "txt_in.weight")) { + flux_params.context_in_dim = pair.second.ne[0]; + flux_params.hidden_size = pair.second.ne[1]; + } + if (ends_with(tensor_name, "single_blocks.0.norm.key_norm.scale")) { + head_dim = pair.second.ne[0]; + } + if (ends_with(tensor_name, "double_blocks.0.txt_attn.norm.key_norm.scale")) { + head_dim = pair.second.ne[0]; + } + } + if (actual_radiance_patch_size > 0 && actual_radiance_patch_size != flux_params.patch_size) { + GGML_ASSERT(flux_params.patch_size == 2 * actual_radiance_patch_size); + LOG_DEBUG("using fake x2 patch size"); + flux_params.chroma_radiance_params.fake_patch_size_x2 = true; } - LOG_INFO("Flux blocks: %d double, %d single", flux_params.depth, flux_params.depth_single_blocks); + flux_params.num_heads = static_cast(flux_params.hidden_size / head_dim); + + LOG_INFO("flux: depth = %d, depth_single_blocks = %d, guidance_embed = %s, context_in_dim = %" PRId64 + ", hidden_size = %" PRId64 ", num_heads = %d", + flux_params.depth, + flux_params.depth_single_blocks, + flux_params.guidance_embed ? "true" : "false", + flux_params.context_in_dim, + flux_params.hidden_size, + flux_params.num_heads); if (flux_params.is_chroma) { LOG_INFO("Using pruned modulation (Chroma)"); - } else if (!flux_params.guidance_embed) { - LOG_INFO("Flux guidance is disabled (Schnell mode)"); } flux = Flux(flux_params); @@ -1431,18 +1403,20 @@ namespace Flux { txt_arange_dims = {1, 2}; } - pe_vec = Rope::gen_flux_pe(x->ne[1], - x->ne[0], + pe_vec = Rope::gen_flux_pe(static_cast(x->ne[1]), + static_cast(x->ne[0]), flux_params.patch_size, - x->ne[3], - context->ne[1], + static_cast(x->ne[3]), + static_cast(context->ne[1]), txt_arange_dims, ref_latents, increase_ref_index, flux_params.ref_index_scale, flux_params.theta, + circular_y_enabled, + circular_x_enabled, flux_params.axes_dim); - int pos_len = pe_vec.size() / flux_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / flux_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, flux_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -1451,10 +1425,10 @@ namespace Flux { set_backend_tensor_data(pe, pe_vec.data()); if (version == VERSION_CHROMA_RADIANCE) { - int64_t patch_size = flux_params.patch_size; - int64_t nerf_max_freqs = flux_params.chroma_radiance_params.nerf_max_freqs; - dct_vec = fetch_dct_pos(patch_size, nerf_max_freqs); - dct = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, nerf_max_freqs * nerf_max_freqs, patch_size * patch_size); + int patch_size = flux_params.patch_size; + int nerf_max_freqs = flux_params.chroma_radiance_params.nerf_max_freqs; + dct_vec = fetch_dct_pos(patch_size, nerf_max_freqs); + dct = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, nerf_max_freqs * nerf_max_freqs, patch_size * patch_size); // dct->data = dct_vec.data(); // print_ggml_tensor(dct); // dct->data = nullptr; @@ -1541,12 +1515,12 @@ namespace Flux { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("flux test done in %dms", t1 - t0); + LOG_DEBUG("flux test done in %lldms", t1 - t0); } } diff --git a/ggml_extend.hpp b/src/ggml_extend.hpp similarity index 85% rename from ggml_extend.hpp rename to src/ggml_extend.hpp index fcaa92c9..131d66fb 100644 --- a/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -97,10 +98,10 @@ static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); __STATIC_INLINE__ struct ggml_tensor* ggml_ext_mul_n_mode(struct ggml_context* ctx, struct ggml_tensor* a, struct ggml_tensor* b, int mode = 0) { // reshape A // swap 0th and nth axis - a = ggml_cont(ctx, ggml_permute(ctx, a, mode, mode != 1 ? 1 : 0, mode != 2 ? 2 : 0, mode != 3 ? 3 : 0)); - int ne1 = a->ne[1]; - int ne2 = a->ne[2]; - int ne3 = a->ne[3]; + a = ggml_cont(ctx, ggml_permute(ctx, a, mode, mode != 1 ? 1 : 0, mode != 2 ? 2 : 0, mode != 3 ? 3 : 0)); + int64_t ne1 = a->ne[1]; + int64_t ne2 = a->ne[2]; + int64_t ne3 = a->ne[3]; // make 2D a = ggml_cont(ctx, ggml_reshape_2d(ctx, a, a->ne[0], (ne3 * ne2 * ne1))); @@ -166,12 +167,12 @@ __STATIC_INLINE__ void ggml_ext_im_set_randn_f32(struct ggml_tensor* tensor, std } } -__STATIC_INLINE__ void ggml_ext_tensor_set_f32(struct ggml_tensor* tensor, float value, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ void ggml_ext_tensor_set_f32(struct ggml_tensor* tensor, float value, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { GGML_ASSERT(tensor->nb[0] == sizeof(float)); *(float*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]) = value; } -__STATIC_INLINE__ float ggml_ext_tensor_get_f32(const ggml_tensor* tensor, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ float ggml_ext_tensor_get_f32(const ggml_tensor* tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { if (tensor->buffer != nullptr) { float value; ggml_backend_tensor_get(tensor, &value, i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0], sizeof(float)); @@ -181,9 +182,9 @@ __STATIC_INLINE__ float ggml_ext_tensor_get_f32(const ggml_tensor* tensor, int i return *(float*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]); } -__STATIC_INLINE__ int ggml_ext_tensor_get_i32(const ggml_tensor* tensor, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ int ggml_ext_tensor_get_i32(const ggml_tensor* tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { if (tensor->buffer != nullptr) { - float value; + int value; ggml_backend_tensor_get(tensor, &value, i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0], sizeof(int)); return value; } @@ -191,12 +192,12 @@ __STATIC_INLINE__ int ggml_ext_tensor_get_i32(const ggml_tensor* tensor, int i0, return *(int*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]); } -__STATIC_INLINE__ ggml_fp16_t ggml_ext_tensor_get_f16(const ggml_tensor* tensor, int i0, int i1 = 0, int i2 = 0, int i3 = 0) { +__STATIC_INLINE__ ggml_fp16_t ggml_ext_tensor_get_f16(const ggml_tensor* tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); return *(ggml_fp16_t*)((char*)(tensor->data) + i3 * tensor->nb[3] + i2 * tensor->nb[2] + i1 * tensor->nb[1] + i0 * tensor->nb[0]); } -__STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int iw, int ih, int ic, bool scale = true) { +__STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int64_t iw, int64_t ih, int64_t ic, bool scale = true) { float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic); if (scale) { value /= 255.f; @@ -204,7 +205,7 @@ __STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int iw, int ih, int i return value; } -__STATIC_INLINE__ float sd_image_get_f32(sd_image_f32_t image, int iw, int ih, int ic, bool scale = true) { +__STATIC_INLINE__ float sd_image_get_f32(sd_image_f32_t image, int64_t iw, int64_t ih, int64_t ic, bool scale = true) { float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic); if (scale) { value /= 255.f; @@ -449,8 +450,8 @@ __STATIC_INLINE__ void ggml_ext_tensor_apply_mask(struct ggml_tensor* image_data int64_t width = output->ne[0]; int64_t height = output->ne[1]; int64_t channels = output->ne[2]; - float rescale_mx = mask->ne[0] / output->ne[0]; - float rescale_my = mask->ne[1] / output->ne[1]; + float rescale_mx = 1.f * mask->ne[0] / output->ne[0]; + float rescale_my = 1.f * mask->ne[1] / output->ne[1]; GGML_ASSERT(output->type == GGML_TYPE_F32); for (int ix = 0; ix < width; ix++) { for (int iy = 0; iy < height; iy++) { @@ -684,9 +685,10 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_torch_permute(struct ggml_context __STATIC_INLINE__ struct ggml_tensor* ggml_ext_slice(struct ggml_context* ctx, struct ggml_tensor* x, - int64_t dim, + int dim, int64_t start, - int64_t end) { + int64_t end, + bool cont = true) { GGML_ASSERT(dim >= 0 && dim < 4); if (x->ne[dim] == 1) { return x; @@ -701,27 +703,15 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_slice(struct ggml_context* ctx, GGML_ASSERT(start >= 0 && start < x->ne[dim]); GGML_ASSERT(end > start && end <= x->ne[dim]); - int perm[4] = {0, 1, 2, 3}; - for (int i = dim; i < 3; ++i) - perm[i] = perm[i + 1]; - perm[3] = dim; + int64_t slice_size = end - start; + int64_t slice_ne[4] = {x->ne[0], x->ne[1], x->ne[2], x->ne[3]}; + slice_ne[dim] = slice_size; - int inv_perm[4]; - for (int i = 0; i < 4; ++i) - inv_perm[perm[i]] = i; + x = ggml_view_4d(ctx, x, + slice_ne[0], slice_ne[1], slice_ne[2], slice_ne[3], + x->nb[1], x->nb[2], x->nb[3], start * x->nb[dim]); - if (dim != 3) { - x = ggml_ext_torch_permute(ctx, x, perm[0], perm[1], perm[2], perm[3]); - x = ggml_cont(ctx, x); - } - - x = ggml_view_4d( - ctx, x, - x->ne[0], x->ne[1], x->ne[2], end - start, - x->nb[1], x->nb[2], x->nb[3], x->nb[3] * start); - - if (dim != 3) { - x = ggml_ext_torch_permute(ctx, x, inv_perm[0], inv_perm[1], inv_perm[2], inv_perm[3]); + if (cont) { x = ggml_cont(ctx, x); } @@ -777,14 +767,14 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_silu_act(ggml_context* ctx, ggml_tensor* return x; } -typedef std::function on_tile_process; +typedef std::function on_tile_process; __STATIC_INLINE__ void sd_tiling_calc_tiles(int& num_tiles_dim, float& tile_overlap_factor_dim, int small_dim, int tile_size, const float tile_overlap_factor) { - int tile_overlap = (tile_size * tile_overlap_factor); + int tile_overlap = static_cast(tile_size * tile_overlap_factor); int non_tile_overlap = tile_size - tile_overlap; num_tiles_dim = (small_dim - tile_overlap) / non_tile_overlap; @@ -928,12 +918,15 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input, int64_t t1 = ggml_time_ms(); ggml_ext_tensor_split_2d(input, input_tile, x_in, y_in); - on_processing(input_tile, output_tile, false); - ggml_ext_tensor_merge_2d(output_tile, output, x_out, y_out, overlap_x_out, overlap_y_out, dx, dy); + if (on_processing(input_tile, output_tile, false)) { + ggml_ext_tensor_merge_2d(output_tile, output, x_out, y_out, overlap_x_out, overlap_y_out, dx, dy); - int64_t t2 = ggml_time_ms(); - last_time = (t2 - t1) / 1000.0f; - pretty_progress(tile_count, num_tiles, last_time); + int64_t t2 = ggml_time_ms(); + last_time = (t2 - t1) / 1000.0f; + pretty_progress(tile_count, num_tiles, last_time); + } else { + LOG_ERROR("Failed to process patch %d at (%d, %d)", tile_count, x, y); + } tile_count++; } last_x = false; @@ -959,6 +952,49 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_group_norm_32(struct ggml_context return ggml_group_norm(ctx, a, 32, eps); } +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_scale(struct ggml_context* ctx, + struct ggml_tensor* x, + float factor, + bool inplace = false) { + if (!ggml_is_contiguous(x)) { + x = ggml_cont(ctx, x); + } + if (inplace) { + x = ggml_scale_inplace(ctx, x, factor); + } else { + x = ggml_scale(ctx, x, factor); + } + return x; +} + +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_gelu(struct ggml_context* ctx, + struct ggml_tensor* x, + bool inplace = false) { + if (!ggml_is_contiguous(x)) { + x = ggml_cont(ctx, x); + } + if (inplace) { + x = ggml_gelu_inplace(ctx, x); + } else { + x = ggml_gelu(ctx, x); + } + return x; +} + +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_gelu_quick(struct ggml_context* ctx, + struct ggml_tensor* x, + bool inplace = false) { + if (!ggml_is_contiguous(x)) { + x = ggml_cont(ctx, x); + } + if (inplace) { + x = ggml_gelu_quick_inplace(ctx, x); + } else { + x = ggml_gelu_quick(ctx, x); + } + return x; +} + __STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* w, @@ -966,7 +1002,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx, bool force_prec_f32 = false, float scale = 1.f) { if (scale != 1.f) { - x = ggml_scale(ctx, x, scale); + x = ggml_ext_scale(ctx, x, scale); } if (x->ne[2] * x->ne[3] > 1024) { // workaround: avoid ggml cuda error @@ -985,7 +1021,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx, } } if (scale != 1.f) { - x = ggml_scale(ctx, x, 1.f / scale); + x = ggml_ext_scale(ctx, x, 1.f / scale); } if (b != nullptr) { x = ggml_add_inplace(ctx, x, b); @@ -993,6 +1029,48 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx, return x; } +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_pad_ext(struct ggml_context* ctx, + struct ggml_tensor* x, + int lp0, + int rp0, + int lp1, + int rp1, + int lp2, + int rp2, + int lp3, + int rp3, + bool circular_x = false, + bool circular_y = false) { + if (circular_x && circular_y) { + return ggml_pad_ext_circular(ctx, x, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3); + } + + if (circular_x && (lp0 != 0 || rp0 != 0)) { + x = ggml_pad_ext_circular(ctx, x, lp0, rp0, 0, 0, 0, 0, 0, 0); + lp0 = rp0 = 0; + } + if (circular_y && (lp1 != 0 || rp1 != 0)) { + x = ggml_pad_ext_circular(ctx, x, 0, 0, lp1, rp1, 0, 0, 0, 0); + lp1 = rp1 = 0; + } + + if (lp0 != 0 || rp0 != 0 || lp1 != 0 || rp1 != 0 || lp2 != 0 || rp2 != 0 || lp3 != 0 || rp3 != 0) { + x = ggml_pad_ext(ctx, x, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3); + } + return x; +} + +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_pad(struct ggml_context* ctx, + struct ggml_tensor* x, + int p0, + int p1, + int p2 = 0, + int p3 = 0, + bool circular_x = false, + bool circular_y = false) { + return ggml_ext_pad_ext(ctx, x, 0, p0, 0, p1, 0, p2, 0, p3, circular_x, circular_y); +} + // w: [OC,IC, KH, KW] // x: [N, IC, IH, IW] // b: [OC,] @@ -1001,27 +1079,36 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_conv_2d(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* w, struct ggml_tensor* b, - int s0 = 1, - int s1 = 1, - int p0 = 0, - int p1 = 0, - int d0 = 1, - int d1 = 1, - bool direct = false, - float scale = 1.f) { + int s0 = 1, + int s1 = 1, + int p0 = 0, + int p1 = 0, + int d0 = 1, + int d1 = 1, + bool direct = false, + bool circular_x = false, + bool circular_y = false, + float scale = 1.f) { if (scale != 1.f) { - x = ggml_scale(ctx, x, scale); + x = ggml_ext_scale(ctx, x, scale); } if (w->ne[2] != x->ne[2] && ggml_n_dims(w) == 2) { w = ggml_reshape_4d(ctx, w, 1, 1, w->ne[0], w->ne[1]); } + + if ((p0 != 0 || p1 != 0) && (circular_x || circular_y)) { + x = ggml_ext_pad_ext(ctx, x, p0, p0, p1, p1, 0, 0, 0, 0, circular_x, circular_y); + p0 = 0; + p1 = 0; + } + if (direct) { x = ggml_conv_2d_direct(ctx, w, x, s0, s1, p0, p1, d0, d1); } else { x = ggml_conv_2d(ctx, w, x, s0, s1, p0, p1, d0, d1); } if (scale != 1.f) { - x = ggml_scale(ctx, x, 1.f / scale); + x = ggml_ext_scale(ctx, x, 1.f / scale); } if (b != nullptr) { b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1); @@ -1119,7 +1206,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_full(struct ggml_context* ctx, int64_t ne2, int64_t ne3) { auto one = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:one"); - auto t = ggml_scale(ctx, one, value); // [1,] + auto t = ggml_ext_scale(ctx, one, value); // [1,] t = ggml_repeat_4d(ctx, t, ne0, ne1, ne2, ne3); // [ne0, ne1, ne2, ne3] return t; } @@ -1132,6 +1219,11 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_zeros(struct ggml_context* ctx, return ggml_ext_full(ctx, 0.f, ne0, ne1, ne2, ne3); } +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_zeros_like(struct ggml_context* ctx, + struct ggml_tensor* x) { + return ggml_ext_zeros(ctx, x->ne[0], x->ne[1], x->ne[2], x->ne[3]); +} + __STATIC_INLINE__ struct ggml_tensor* ggml_ext_ones(struct ggml_context* ctx, int64_t ne0, int64_t ne1, @@ -1140,6 +1232,11 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_ones(struct ggml_context* ctx, return ggml_ext_full(ctx, 1.f, ne0, ne1, ne2, ne3); } +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_ones_like(struct ggml_context* ctx, + struct ggml_tensor* x) { + return ggml_ext_ones(ctx, x->ne[0], x->ne[1], x->ne[2], x->ne[3]); +} + __STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_tensor* a) { #ifdef SD_USE_VULKAN auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); @@ -1156,35 +1253,11 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_tensor* } else { out = ggml_mul_mat(ctx, out, one); } - out = ggml_reshape(ctx, out, a); + out = ggml_reshape(ctx, out, a); #endif return out; } -// q: [N * n_head, n_token, d_head] -// k: [N * n_head, n_k, d_head] -// v: [N * n_head, d_head, n_k] -// return: [N * n_head, n_token, d_head] -__STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention(struct ggml_context* ctx, - struct ggml_tensor* q, - struct ggml_tensor* k, - struct ggml_tensor* v, - bool mask = false) { -#if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUDA) && !defined(SD_USE_METAL) && !defined(SD_USE_VULKAN) && !defined(SD_USE_SYCL) - struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head] -#else - float d_head = (float)q->ne[0]; - struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k] - kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head)); - if (mask) { - kq = ggml_diag_mask_inf_inplace(ctx, kq, 0); - } - kq = ggml_soft_max_inplace(ctx, kq); - struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head] -#endif - return kqv; -} - // q: [N, L_q, C(n_head*d_head)] or [N*n_head, L_q, d_head] // k: [N, L_k, n_kv_head*d_head] or [N*n_kv_head, L_k, d_head] // v: [N, L_k, n_kv_head*d_head] or [N, L_k, n_kv_head, d_head] @@ -1197,7 +1270,6 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context struct ggml_tensor* v, int64_t n_head, struct ggml_tensor* mask = nullptr, - bool diag_mask_inf = false, bool skip_reshape = false, bool flash_attn = false, float kv_scale = 1.0f) { // avoid overflow @@ -1243,7 +1315,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context k_in = ggml_pad(ctx, k_in, 0, kv_pad, 0, 0); } if (kv_scale != 1.0f) { - k_in = ggml_scale(ctx, k_in, kv_scale); + k_in = ggml_ext_scale(ctx, k_in, kv_scale); } k_in = ggml_cast(ctx, k_in, GGML_TYPE_F16); @@ -1253,7 +1325,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context v_in = ggml_pad(ctx, v_in, 0, kv_pad, 0, 0); } if (kv_scale != 1.0f) { - v_in = ggml_scale(ctx, v_in, kv_scale); + v_in = ggml_ext_scale(ctx, v_in, kv_scale); } v_in = ggml_cast(ctx, v_in, GGML_TYPE_F16); @@ -1285,7 +1357,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context auto out = ggml_flash_attn_ext(ctx, q_in, k_in, v_in, mask_in, scale / kv_scale, 0, 0); ggml_flash_attn_ext_set_prec(out, GGML_PREC_F32); if (kv_scale != 1.0f) { - out = ggml_scale(ctx, out, 1.0f / kv_scale); + out = ggml_ext_scale(ctx, out, 1.0f / kv_scale); } return out; }; @@ -1294,7 +1366,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context // LOG_DEBUG("attention_ext L_q:%d L_k:%d n_head:%d C:%d d_head:%d N:%d", L_q, L_k, n_head, C, d_head, N); bool can_use_flash_attn = true; if (can_use_flash_attn && L_k % 256 != 0) { - kv_pad = GGML_PAD(L_k, 256) - L_k; + kv_pad = GGML_PAD(L_k, 256) - static_cast(L_k); } if (mask != nullptr) { @@ -1320,13 +1392,11 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context v = ggml_reshape_3d(ctx, v, L_k, d_head, n_kv_head * N); // [N * n_kv_head, d_head, L_k] auto kq = ggml_mul_mat(ctx, k, q); // [N * n_head, L_q, L_k] - kq = ggml_scale_inplace(ctx, kq, scale); + ggml_mul_mat_set_prec(kq, GGML_PREC_F32); + kq = ggml_scale_inplace(ctx, kq, scale); if (mask) { kq = ggml_add_inplace(ctx, kq, mask); } - if (diag_mask_inf) { - kq = ggml_diag_mask_inf_inplace(ctx, kq, 0); - } kq = ggml_soft_max_inplace(ctx, kq); kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, L_q, d_head] @@ -1494,7 +1564,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_timestep_embedding( int dim, int max_period = 10000, float time_factor = 1.0f) { - timesteps = ggml_scale(ctx, timesteps, time_factor); + timesteps = ggml_ext_scale(ctx, timesteps, time_factor); return ggml_timestep_embedding(ctx, timesteps, dim, max_period); } @@ -1520,15 +1590,17 @@ struct WeightAdapter { bool force_prec_f32 = false; float scale = 1.f; } linear; - struct { - int s0 = 1; - int s1 = 1; - int p0 = 0; - int p1 = 0; - int d0 = 1; - int d1 = 1; - bool direct = false; - float scale = 1.f; + struct conv2d_params_t { + int s0 = 1; + int s1 = 1; + int p0 = 0; + int p1 = 0; + int d0 = 1; + int d1 = 1; + bool direct = false; + bool circular_x = false; + bool circular_y = false; + float scale = 1.f; } conv2d; }; virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) = 0; @@ -1546,6 +1618,8 @@ struct GGMLRunnerContext { ggml_context* ggml_ctx = nullptr; bool flash_attn_enabled = false; bool conv2d_direct_enabled = false; + bool circular_x_enabled = false; + bool circular_y_enabled = false; std::shared_ptr weight_adapter = nullptr; }; @@ -1582,6 +1656,8 @@ protected: bool flash_attn_enabled = false; bool conv2d_direct_enabled = false; + bool circular_x_enabled = false; + bool circular_y_enabled = false; void alloc_params_ctx() { struct ggml_init_params params; @@ -1859,6 +1935,8 @@ public: runner_ctx.backend = runtime_backend; runner_ctx.flash_attn_enabled = flash_attn_enabled; runner_ctx.conv2d_direct_enabled = conv2d_direct_enabled; + runner_ctx.circular_x_enabled = circular_x_enabled; + runner_ctx.circular_y_enabled = circular_y_enabled; runner_ctx.weight_adapter = weight_adapter; return runner_ctx; } @@ -2003,6 +2081,11 @@ public: conv2d_direct_enabled = enabled; } + void set_circular_axes(bool circular_x, bool circular_y) { + circular_x_enabled = circular_x; + circular_y_enabled = circular_y; + } + void set_weight_adapter(const std::shared_ptr& adapter) { weight_adapter = adapter; } @@ -2266,15 +2349,17 @@ public: } if (ctx->weight_adapter) { WeightAdapter::ForwardParams forward_params; - forward_params.op_type = WeightAdapter::ForwardParams::op_type_t::OP_CONV2D; - forward_params.conv2d.s0 = stride.second; - forward_params.conv2d.s1 = stride.first; - forward_params.conv2d.p0 = padding.second; - forward_params.conv2d.p1 = padding.first; - forward_params.conv2d.d0 = dilation.second; - forward_params.conv2d.d1 = dilation.first; - forward_params.conv2d.direct = ctx->conv2d_direct_enabled; - forward_params.conv2d.scale = scale; + forward_params.op_type = WeightAdapter::ForwardParams::op_type_t::OP_CONV2D; + forward_params.conv2d.s0 = stride.second; + forward_params.conv2d.s1 = stride.first; + forward_params.conv2d.p0 = padding.second; + forward_params.conv2d.p1 = padding.first; + forward_params.conv2d.d0 = dilation.second; + forward_params.conv2d.d1 = dilation.first; + forward_params.conv2d.direct = ctx->conv2d_direct_enabled; + forward_params.conv2d.circular_x = ctx->circular_x_enabled; + forward_params.conv2d.circular_y = ctx->circular_y_enabled; + forward_params.conv2d.scale = scale; return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); } return ggml_ext_conv_2d(ctx->ggml_ctx, @@ -2288,57 +2373,12 @@ public: dilation.second, dilation.first, ctx->conv2d_direct_enabled, + ctx->circular_x_enabled, + ctx->circular_y_enabled, scale); } }; -class Conv3dnx1x1 : public UnaryBlock { -protected: - int64_t in_channels; - int64_t out_channels; - int64_t kernel_size; - int64_t stride; - int64_t padding; - int64_t dilation; - bool bias; - - void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map, const std::string prefix = "") override { - enum ggml_type wtype = GGML_TYPE_F16; - params["weight"] = ggml_new_tensor_4d(ctx, wtype, 1, kernel_size, in_channels, out_channels); // 5d => 4d - if (bias) { - enum ggml_type wtype = GGML_TYPE_F32; - params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels); - } - } - -public: - Conv3dnx1x1(int64_t in_channels, - int64_t out_channels, - int64_t kernel_size, - int64_t stride = 1, - int64_t padding = 0, - int64_t dilation = 1, - bool bias = true) - : in_channels(in_channels), - out_channels(out_channels), - kernel_size(kernel_size), - stride(stride), - padding(padding), - dilation(dilation), - bias(bias) {} - - // x: [N, IC, ID, IH*IW] - // result: [N, OC, OD, OH*OW] - struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { - struct ggml_tensor* w = params["weight"]; - struct ggml_tensor* b = nullptr; - if (bias) { - b = params["bias"]; - } - return ggml_ext_conv_3d_nx1x1(ctx->ggml_ctx, x, w, b, stride, padding, dilation); - } -}; - class Conv3d : public UnaryBlock { protected: int64_t in_channels; @@ -2454,7 +2494,7 @@ public: class GroupNorm : public GGMLBlock { protected: - int64_t num_groups; + int num_groups; int64_t num_channels; float eps; bool affine; @@ -2471,7 +2511,7 @@ protected: } public: - GroupNorm(int64_t num_groups, + GroupNorm(int num_groups, int64_t num_channels, float eps = 1e-05f, bool affine = true) @@ -2573,7 +2613,7 @@ public: // x: [N, n_token, embed_dim] struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, - bool mask = false) { + struct ggml_tensor* mask = nullptr) { auto out_proj = std::dynamic_pointer_cast(blocks[out_proj_name]); ggml_tensor* q; @@ -2596,11 +2636,180 @@ public: v = v_proj->forward(ctx, x); } - x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, n_head, nullptr, mask); // [N, n_token, embed_dim] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, n_head, mask, false); // [N, n_token, embed_dim] x = out_proj->forward(ctx, x); // [N, n_token, embed_dim] return x; } }; +__STATIC_INLINE__ struct ggml_tensor* ggml_ext_lokr_forward( + struct ggml_context* ctx, + struct ggml_tensor* h, // Input: [q, batch] or [W, H, q, batch] + struct ggml_tensor* w1, // Outer C (Full rank) + struct ggml_tensor* w1a, // Outer A (Low rank part 1) + struct ggml_tensor* w1b, // Outer B (Low rank part 2) + struct ggml_tensor* w2, // Inner BA (Full rank) + struct ggml_tensor* w2a, // Inner A (Low rank part 1) + struct ggml_tensor* w2b, // Inner B (Low rank part 2) + bool is_conv, + WeightAdapter::ForwardParams::conv2d_params_t conv_params, + float scale) { + GGML_ASSERT((w1 != NULL || (w1a != NULL && w1b != NULL))); + GGML_ASSERT((w2 != NULL || (w2a != NULL && w2b != NULL))); + + int uq = (w1 != NULL) ? (int)w1->ne[0] : (int)w1a->ne[0]; + int up = (w1 != NULL) ? (int)w1->ne[1] : (int)w1b->ne[1]; + + int q_actual = is_conv ? (int)h->ne[2] : (int)h->ne[0]; + int vq = q_actual / uq; + + int vp = (w2 != NULL) ? (is_conv ? (int)w2->ne[3] : (int)w2->ne[1]) + : (int)w2a->ne[1]; + GGML_ASSERT(q_actual == (uq * vq) && "Input dimension mismatch for LoKR split"); + + struct ggml_tensor* hb; + + if (!is_conv) { + int batch = (int)h->ne[1]; + int merge_batch_uq = batch; + int merge_batch_vp = batch; + +#if SD_USE_VULKAN + if (batch > 1) { + // no access to backend here, worst case is slightly worse perfs for other backends when built alongside Vulkan backend + int max_batch = 65535; + int max_batch_uq = max_batch / uq; + merge_batch_uq = 1; + for (int i = max_batch_uq; i > 0; i--) { + if (batch % i == 0) { + merge_batch_uq = i; + break; + } + } + + int max_batch_vp = max_batch / vp; + merge_batch_vp = 1; + for (int i = max_batch_vp; i > 0; i--) { + if (batch % i == 0) { + merge_batch_vp = i; + break; + } + } + } +#endif + + struct ggml_tensor* h_split = ggml_reshape_3d(ctx, h, vq, uq * merge_batch_uq, batch / merge_batch_uq); + if (w2 != NULL) { + hb = ggml_mul_mat(ctx, w2, h_split); + } else { + hb = ggml_mul_mat(ctx, w2b, ggml_mul_mat(ctx, w2a, h_split)); + } + + if (batch > 1) { + hb = ggml_reshape_3d(ctx, hb, vp, uq, batch); + } + struct ggml_tensor* hb_t = ggml_cont(ctx, ggml_transpose(ctx, hb)); + hb_t = ggml_reshape_3d(ctx, hb_t, uq, vp * merge_batch_vp, batch / merge_batch_vp); + + struct ggml_tensor* hc_t; + if (w1 != NULL) { + hc_t = ggml_mul_mat(ctx, w1, hb_t); + } else { + hc_t = ggml_mul_mat(ctx, w1b, ggml_mul_mat(ctx, w1a, hb_t)); + } + + if (batch > 1) { + hc_t = ggml_reshape_3d(ctx, hc_t, up, vp, batch); + } + + struct ggml_tensor* hc = ggml_transpose(ctx, hc_t); + struct ggml_tensor* out = ggml_reshape_2d(ctx, ggml_cont(ctx, hc), up * vp, batch); + return ggml_scale(ctx, out, scale); + } else { + int batch = (int)h->ne[3]; + // 1. Reshape input: [W, H, vq*uq, batch] -> [W, H, vq, uq * batch] + struct ggml_tensor* h_split = ggml_reshape_4d(ctx, h, h->ne[0], h->ne[1], vq, uq * batch); + + if (w2 != NULL) { + hb = ggml_ext_conv_2d(ctx, h_split, w2, nullptr, + conv_params.s0, + conv_params.s1, + conv_params.p0, + conv_params.p1, + conv_params.d0, + conv_params.d1, + conv_params.direct, + conv_params.circular_x, + conv_params.circular_y, + conv_params.scale); + } else { + // swap a and b order for conv lora + struct ggml_tensor* a = w2b; + struct ggml_tensor* b = w2a; + + // unpack conv2d weights if needed + if (ggml_n_dims(a) < 4) { + int k = (int)sqrt(a->ne[0] / h_split->ne[2]); + GGML_ASSERT(k * k * h_split->ne[2] == a->ne[0]); + a = ggml_reshape_4d(ctx, a, k, k, a->ne[0] / (k * k), a->ne[1]); + } else if (a->ne[2] != h_split->ne[2]) { + int k = (int)sqrt(a->ne[2] / h_split->ne[2]); + GGML_ASSERT(k * k * h_split->ne[2] == a->ne[2]); + a = ggml_reshape_4d(ctx, a, a->ne[0] * k, a->ne[1] * k, a->ne[2] / (k * k), a->ne[3]); + } + struct ggml_tensor* ha = ggml_ext_conv_2d(ctx, h_split, a, nullptr, + conv_params.s0, + conv_params.s1, + conv_params.p0, + conv_params.p1, + conv_params.d0, + conv_params.d1, + conv_params.direct, + conv_params.circular_x, + conv_params.circular_y, + conv_params.scale); + + // not supporting lora_mid here + hb = ggml_ext_conv_2d(ctx, + ha, + b, + nullptr, + 1, + 1, + 0, + 0, + 1, + 1, + conv_params.direct, + conv_params.circular_x, + conv_params.circular_y, + conv_params.scale); + } + + // Current hb shape: [W_out, H_out, vp, uq * batch] + int w_out = (int)hb->ne[0]; + int h_out = (int)hb->ne[1]; + + // struct ggml_tensor* hb_cat = ggml_reshape_4d(ctx, hb, w_out , h_out , vp * uq, batch); + // [W_out, H_out, vp * uq, batch] + // Now left to compute (W1 kr Id) * hb_cat == (W1 kr W2) cv h + + // merge the uq groups of size vp*w_out*h_out + struct ggml_tensor* hb_merged = ggml_reshape_2d(ctx, hb, w_out * h_out * vp, uq * batch); + struct ggml_tensor* hc_t; + struct ggml_tensor* hb_merged_t = ggml_cont(ctx, ggml_transpose(ctx, hb_merged)); + if (w1 != NULL) { + // Would be great to be able to transpose w1 instead to avoid transposing both hb and hc + hc_t = ggml_mul_mat(ctx, w1, hb_merged_t); + } else { + hc_t = ggml_mul_mat(ctx, w1b, ggml_mul_mat(ctx, w1a, hb_merged_t)); + } + struct ggml_tensor* hc = ggml_transpose(ctx, hc_t); + // ungroup + struct ggml_tensor* out = ggml_reshape_4d(ctx, ggml_cont(ctx, hc), w_out, h_out, up * vp, batch); + return ggml_scale(ctx, out, scale); + } +} + #endif // __GGML_EXTEND__HPP__ diff --git a/gguf_reader.hpp b/src/gguf_reader.hpp similarity index 99% rename from gguf_reader.hpp rename to src/gguf_reader.hpp index 53482662..2cc4d9d9 100644 --- a/gguf_reader.hpp +++ b/src/gguf_reader.hpp @@ -151,7 +151,7 @@ private: } if (n_dims > GGML_MAX_DIMS) { - for (int i = GGML_MAX_DIMS; i < n_dims; i++) { + for (uint32_t i = GGML_MAX_DIMS; i < n_dims; i++) { info.shape[GGML_MAX_DIMS - 1] *= info.shape[i]; // stack to last dim; } info.shape.resize(GGML_MAX_DIMS); diff --git a/gits_noise.inl b/src/gits_noise.inl similarity index 100% rename from gits_noise.inl rename to src/gits_noise.inl diff --git a/latent-preview.h b/src/latent-preview.h similarity index 93% rename from latent-preview.h rename to src/latent-preview.h index 2c54c3b5..85c8e0dc 100644 --- a/latent-preview.h +++ b/src/latent-preview.h @@ -1,234 +1,234 @@ -#include -#include -#include "ggml.h" - -const float wan_21_latent_rgb_proj[16][3] = { - {0.015123f, -0.148418f, 0.479828f}, - {0.003652f, -0.010680f, -0.037142f}, - {0.212264f, 0.063033f, 0.016779f}, - {0.232999f, 0.406476f, 0.220125f}, - {-0.051864f, -0.082384f, -0.069396f}, - {0.085005f, -0.161492f, 0.010689f}, - {-0.245369f, -0.506846f, -0.117010f}, - {-0.151145f, 0.017721f, 0.007207f}, - {-0.293239f, -0.207936f, -0.421135f}, - {-0.187721f, 0.050783f, 0.177649f}, - {-0.013067f, 0.265964f, 0.166578f}, - {0.028327f, 0.109329f, 0.108642f}, - {-0.205343f, 0.043991f, 0.148914f}, - {0.014307f, -0.048647f, -0.007219f}, - {0.217150f, 0.053074f, 0.319923f}, - {0.155357f, 0.083156f, 0.064780f}}; -float wan_21_latent_rgb_bias[3] = {-0.270270f, -0.234976f, -0.456853f}; - -const float wan_22_latent_rgb_proj[48][3] = { - {0.017126f, -0.027230f, -0.019257f}, - {-0.113739f, -0.028715f, -0.022885f}, - {-0.000106f, 0.021494f, 0.004629f}, - {-0.013273f, -0.107137f, -0.033638f}, - {-0.000381f, 0.000279f, 0.025877f}, - {-0.014216f, -0.003975f, 0.040528f}, - {0.001638f, -0.000748f, 0.011022f}, - {0.029238f, -0.006697f, 0.035933f}, - {0.021641f, -0.015874f, 0.040531f}, - {-0.101984f, -0.070160f, -0.028855f}, - {0.033207f, -0.021068f, 0.002663f}, - {-0.104711f, 0.121673f, 0.102981f}, - {0.082647f, -0.004991f, 0.057237f}, - {-0.027375f, 0.031581f, 0.006868f}, - {-0.045434f, 0.029444f, 0.019287f}, - {-0.046572f, -0.012537f, 0.006675f}, - {0.074709f, 0.033690f, 0.025289f}, - {-0.008251f, -0.002745f, -0.006999f}, - {0.012685f, -0.061856f, -0.048658f}, - {0.042304f, -0.007039f, 0.000295f}, - {-0.007644f, -0.060843f, -0.033142f}, - {0.159909f, 0.045628f, 0.367541f}, - {0.095171f, 0.086438f, 0.010271f}, - {0.006812f, 0.019643f, 0.029637f}, - {0.003467f, -0.010705f, 0.014252f}, - {-0.099681f, -0.066272f, -0.006243f}, - {0.047357f, 0.037040f, 0.000185f}, - {-0.041797f, -0.089225f, -0.032257f}, - {0.008928f, 0.017028f, 0.018684f}, - {-0.042255f, 0.016045f, 0.006849f}, - {0.011268f, 0.036462f, 0.037387f}, - {0.011553f, -0.016375f, -0.048589f}, - {0.046266f, -0.027189f, 0.056979f}, - {0.009640f, -0.017576f, 0.030324f}, - {-0.045794f, -0.036083f, -0.010616f}, - {0.022418f, 0.039783f, -0.032939f}, - {-0.052714f, -0.015525f, 0.007438f}, - {0.193004f, 0.223541f, 0.264175f}, - {-0.059406f, -0.008188f, 0.022867f}, - {-0.156742f, -0.263791f, -0.007385f}, - {-0.015717f, 0.016570f, 0.033969f}, - {0.037969f, 0.109835f, 0.200449f}, - {-0.000782f, -0.009566f, -0.008058f}, - {0.010709f, 0.052960f, -0.044195f}, - {0.017271f, 0.045839f, 0.034569f}, - {0.009424f, 0.013088f, -0.001714f}, - {-0.024805f, -0.059378f, -0.033756f}, - {-0.078293f, 0.029070f, 0.026129f}}; -float wan_22_latent_rgb_bias[3] = {0.013160f, -0.096492f, -0.071323f}; - -const float flux_latent_rgb_proj[16][3] = { - {-0.041168f, 0.019917f, 0.097253f}, - {0.028096f, 0.026730f, 0.129576f}, - {0.065618f, -0.067950f, -0.014651f}, - {-0.012998f, -0.014762f, 0.081251f}, - {0.078567f, 0.059296f, -0.024687f}, - {-0.015987f, -0.003697f, 0.005012f}, - {0.033605f, 0.138999f, 0.068517f}, - {-0.024450f, -0.063567f, -0.030101f}, - {-0.040194f, -0.016710f, 0.127185f}, - {0.112681f, 0.088764f, -0.041940f}, - {-0.023498f, 0.093664f, 0.025543f}, - {0.082899f, 0.048320f, 0.007491f}, - {0.075712f, 0.074139f, 0.081965f}, - {-0.143501f, 0.018263f, -0.136138f}, - {-0.025767f, -0.082035f, -0.040023f}, - {-0.111849f, -0.055589f, -0.032361f}}; -float flux_latent_rgb_bias[3] = {0.024600f, -0.006937f, -0.008089f}; - -const float flux2_latent_rgb_proj[32][3] = { - {0.000736f, -0.008385f, -0.019710f}, - {-0.001352f, -0.016392f, 0.020693f}, - {-0.006376f, 0.002428f, 0.036736f}, - {0.039384f, 0.074167f, 0.119789f}, - {0.007464f, -0.005705f, -0.004734f}, - {-0.004086f, 0.005287f, -0.000409f}, - {-0.032835f, 0.050802f, -0.028120f}, - {-0.003158f, -0.000835f, 0.000406f}, - {-0.112840f, -0.084337f, -0.023083f}, - {0.001462f, -0.006656f, 0.000549f}, - {-0.009980f, -0.007480f, 0.009702f}, - {0.032540f, 0.000214f, -0.061388f}, - {0.011023f, 0.000694f, 0.007143f}, - {-0.001468f, -0.006723f, -0.001678f}, - {-0.005921f, -0.010320f, -0.003907f}, - {-0.028434f, 0.027584f, 0.018457f}, - {0.014349f, 0.011523f, 0.000441f}, - {0.009874f, 0.003081f, 0.001507f}, - {0.002218f, 0.005712f, 0.001563f}, - {0.053010f, -0.019844f, 0.008683f}, - {-0.002507f, 0.005384f, 0.000938f}, - {-0.002177f, -0.011366f, 0.003559f}, - {-0.000261f, 0.015121f, -0.003240f}, - {-0.003944f, -0.002083f, 0.005043f}, - {-0.009138f, 0.011336f, 0.003781f}, - {0.011429f, 0.003985f, -0.003855f}, - {0.010518f, -0.005586f, 0.010131f}, - {0.007883f, 0.002912f, -0.001473f}, - {-0.003318f, -0.003160f, 0.003684f}, - {-0.034560f, -0.008740f, 0.012996f}, - {0.000166f, 0.001079f, -0.012153f}, - {0.017772f, 0.000937f, -0.011953f}}; -float flux2_latent_rgb_bias[3] = {-0.028738f, -0.098463f, -0.107619f}; - -// This one was taken straight from -// https://github.com/Stability-AI/sd3.5/blob/8565799a3b41eb0c7ba976d18375f0f753f56402/sd3_impls.py#L288-L303 -// (MiT Licence) -const float sd3_latent_rgb_proj[16][3] = { - {-0.0645f, 0.0177f, 0.1052f}, - {0.0028f, 0.0312f, 0.0650f}, - {0.1848f, 0.0762f, 0.0360f}, - {0.0944f, 0.0360f, 0.0889f}, - {0.0897f, 0.0506f, -0.0364f}, - {-0.0020f, 0.1203f, 0.0284f}, - {0.0855f, 0.0118f, 0.0283f}, - {-0.0539f, 0.0658f, 0.1047f}, - {-0.0057f, 0.0116f, 0.0700f}, - {-0.0412f, 0.0281f, -0.0039f}, - {0.1106f, 0.1171f, 0.1220f}, - {-0.0248f, 0.0682f, -0.0481f}, - {0.0815f, 0.0846f, 0.1207f}, - {-0.0120f, -0.0055f, -0.0867f}, - {-0.0749f, -0.0634f, -0.0456f}, - {-0.1418f, -0.1457f, -0.1259f}, -}; -float sd3_latent_rgb_bias[3] = {0, 0, 0}; - -const float sdxl_latent_rgb_proj[4][3] = { - {0.258303f, 0.277640f, 0.329699f}, - {-0.299701f, 0.105446f, 0.014194f}, - {0.050522f, 0.186163f, -0.143257f}, - {-0.211938f, -0.149892f, -0.080036f}}; -float sdxl_latent_rgb_bias[3] = {0.144381f, -0.033313f, 0.007061f}; - -const float sd_latent_rgb_proj[4][3] = { - {0.337366f, 0.216344f, 0.257386f}, - {0.165636f, 0.386828f, 0.046994f}, - {-0.267803f, 0.237036f, 0.223517f}, - {-0.178022f, -0.200862f, -0.678514f}}; -float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f}; - -void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) { - size_t buffer_head = 0; - - uint32_t latent_width = latents->ne[0]; - uint32_t latent_height = latents->ne[1]; - uint32_t dim = latents->ne[ggml_n_dims(latents) - 1]; - uint32_t frames = 1; - if (ggml_n_dims(latents) == 4) { - frames = latents->ne[2]; - } - - uint32_t rgb_width = latent_width * patch_size; - uint32_t rgb_height = latent_height * patch_size; - - uint32_t unpatched_dim = dim / (patch_size * patch_size); - - for (int k = 0; k < frames; k++) { - for (int rgb_x = 0; rgb_x < rgb_width; rgb_x++) { - for (int rgb_y = 0; rgb_y < rgb_height; rgb_y++) { - int latent_x = rgb_x / patch_size; - int latent_y = rgb_y / patch_size; - - int channel_offset = 0; - if (patch_size > 1) { - channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size)); - } - - size_t latent_id = (latent_x * latents->nb[0] + latent_y * latents->nb[1] + k * latents->nb[2]); - - // should be incremented by 1 for each pixel - size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x; - - float r = 0, g = 0, b = 0; - if (latent_rgb_proj != nullptr) { - for (int d = 0; d < unpatched_dim; d++) { - float value = *(float*)((char*)latents->data + latent_id + (d * patch_size * patch_size + channel_offset) * latents->nb[ggml_n_dims(latents) - 1]); - r += value * latent_rgb_proj[d][0]; - g += value * latent_rgb_proj[d][1]; - b += value * latent_rgb_proj[d][2]; - } - } else { - // interpret first 3 channels as RGB - r = *(float*)((char*)latents->data + latent_id + 0 * latents->nb[ggml_n_dims(latents) - 1]); - g = *(float*)((char*)latents->data + latent_id + 1 * latents->nb[ggml_n_dims(latents) - 1]); - b = *(float*)((char*)latents->data + latent_id + 2 * latents->nb[ggml_n_dims(latents) - 1]); - } - if (latent_rgb_bias != nullptr) { - // bias - r += latent_rgb_bias[0]; - g += latent_rgb_bias[1]; - b += latent_rgb_bias[2]; - } - // change range - r = r * .5f + .5f; - g = g * .5f + .5f; - b = b * .5f + .5f; - - // clamp rgb values to [0,1] range - r = r >= 0 ? r <= 1 ? r : 1 : 0; - g = g >= 0 ? g <= 1 ? g : 1 : 0; - b = b >= 0 ? b <= 1 ? b : 1 : 0; - - buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255); - buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255); - buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255); - } - } - } -} +#include +#include +#include "ggml.h" + +const float wan_21_latent_rgb_proj[16][3] = { + {0.015123f, -0.148418f, 0.479828f}, + {0.003652f, -0.010680f, -0.037142f}, + {0.212264f, 0.063033f, 0.016779f}, + {0.232999f, 0.406476f, 0.220125f}, + {-0.051864f, -0.082384f, -0.069396f}, + {0.085005f, -0.161492f, 0.010689f}, + {-0.245369f, -0.506846f, -0.117010f}, + {-0.151145f, 0.017721f, 0.007207f}, + {-0.293239f, -0.207936f, -0.421135f}, + {-0.187721f, 0.050783f, 0.177649f}, + {-0.013067f, 0.265964f, 0.166578f}, + {0.028327f, 0.109329f, 0.108642f}, + {-0.205343f, 0.043991f, 0.148914f}, + {0.014307f, -0.048647f, -0.007219f}, + {0.217150f, 0.053074f, 0.319923f}, + {0.155357f, 0.083156f, 0.064780f}}; +float wan_21_latent_rgb_bias[3] = {-0.270270f, -0.234976f, -0.456853f}; + +const float wan_22_latent_rgb_proj[48][3] = { + {0.017126f, -0.027230f, -0.019257f}, + {-0.113739f, -0.028715f, -0.022885f}, + {-0.000106f, 0.021494f, 0.004629f}, + {-0.013273f, -0.107137f, -0.033638f}, + {-0.000381f, 0.000279f, 0.025877f}, + {-0.014216f, -0.003975f, 0.040528f}, + {0.001638f, -0.000748f, 0.011022f}, + {0.029238f, -0.006697f, 0.035933f}, + {0.021641f, -0.015874f, 0.040531f}, + {-0.101984f, -0.070160f, -0.028855f}, + {0.033207f, -0.021068f, 0.002663f}, + {-0.104711f, 0.121673f, 0.102981f}, + {0.082647f, -0.004991f, 0.057237f}, + {-0.027375f, 0.031581f, 0.006868f}, + {-0.045434f, 0.029444f, 0.019287f}, + {-0.046572f, -0.012537f, 0.006675f}, + {0.074709f, 0.033690f, 0.025289f}, + {-0.008251f, -0.002745f, -0.006999f}, + {0.012685f, -0.061856f, -0.048658f}, + {0.042304f, -0.007039f, 0.000295f}, + {-0.007644f, -0.060843f, -0.033142f}, + {0.159909f, 0.045628f, 0.367541f}, + {0.095171f, 0.086438f, 0.010271f}, + {0.006812f, 0.019643f, 0.029637f}, + {0.003467f, -0.010705f, 0.014252f}, + {-0.099681f, -0.066272f, -0.006243f}, + {0.047357f, 0.037040f, 0.000185f}, + {-0.041797f, -0.089225f, -0.032257f}, + {0.008928f, 0.017028f, 0.018684f}, + {-0.042255f, 0.016045f, 0.006849f}, + {0.011268f, 0.036462f, 0.037387f}, + {0.011553f, -0.016375f, -0.048589f}, + {0.046266f, -0.027189f, 0.056979f}, + {0.009640f, -0.017576f, 0.030324f}, + {-0.045794f, -0.036083f, -0.010616f}, + {0.022418f, 0.039783f, -0.032939f}, + {-0.052714f, -0.015525f, 0.007438f}, + {0.193004f, 0.223541f, 0.264175f}, + {-0.059406f, -0.008188f, 0.022867f}, + {-0.156742f, -0.263791f, -0.007385f}, + {-0.015717f, 0.016570f, 0.033969f}, + {0.037969f, 0.109835f, 0.200449f}, + {-0.000782f, -0.009566f, -0.008058f}, + {0.010709f, 0.052960f, -0.044195f}, + {0.017271f, 0.045839f, 0.034569f}, + {0.009424f, 0.013088f, -0.001714f}, + {-0.024805f, -0.059378f, -0.033756f}, + {-0.078293f, 0.029070f, 0.026129f}}; +float wan_22_latent_rgb_bias[3] = {0.013160f, -0.096492f, -0.071323f}; + +const float flux_latent_rgb_proj[16][3] = { + {-0.041168f, 0.019917f, 0.097253f}, + {0.028096f, 0.026730f, 0.129576f}, + {0.065618f, -0.067950f, -0.014651f}, + {-0.012998f, -0.014762f, 0.081251f}, + {0.078567f, 0.059296f, -0.024687f}, + {-0.015987f, -0.003697f, 0.005012f}, + {0.033605f, 0.138999f, 0.068517f}, + {-0.024450f, -0.063567f, -0.030101f}, + {-0.040194f, -0.016710f, 0.127185f}, + {0.112681f, 0.088764f, -0.041940f}, + {-0.023498f, 0.093664f, 0.025543f}, + {0.082899f, 0.048320f, 0.007491f}, + {0.075712f, 0.074139f, 0.081965f}, + {-0.143501f, 0.018263f, -0.136138f}, + {-0.025767f, -0.082035f, -0.040023f}, + {-0.111849f, -0.055589f, -0.032361f}}; +float flux_latent_rgb_bias[3] = {0.024600f, -0.006937f, -0.008089f}; + +const float flux2_latent_rgb_proj[32][3] = { + {0.000736f, -0.008385f, -0.019710f}, + {-0.001352f, -0.016392f, 0.020693f}, + {-0.006376f, 0.002428f, 0.036736f}, + {0.039384f, 0.074167f, 0.119789f}, + {0.007464f, -0.005705f, -0.004734f}, + {-0.004086f, 0.005287f, -0.000409f}, + {-0.032835f, 0.050802f, -0.028120f}, + {-0.003158f, -0.000835f, 0.000406f}, + {-0.112840f, -0.084337f, -0.023083f}, + {0.001462f, -0.006656f, 0.000549f}, + {-0.009980f, -0.007480f, 0.009702f}, + {0.032540f, 0.000214f, -0.061388f}, + {0.011023f, 0.000694f, 0.007143f}, + {-0.001468f, -0.006723f, -0.001678f}, + {-0.005921f, -0.010320f, -0.003907f}, + {-0.028434f, 0.027584f, 0.018457f}, + {0.014349f, 0.011523f, 0.000441f}, + {0.009874f, 0.003081f, 0.001507f}, + {0.002218f, 0.005712f, 0.001563f}, + {0.053010f, -0.019844f, 0.008683f}, + {-0.002507f, 0.005384f, 0.000938f}, + {-0.002177f, -0.011366f, 0.003559f}, + {-0.000261f, 0.015121f, -0.003240f}, + {-0.003944f, -0.002083f, 0.005043f}, + {-0.009138f, 0.011336f, 0.003781f}, + {0.011429f, 0.003985f, -0.003855f}, + {0.010518f, -0.005586f, 0.010131f}, + {0.007883f, 0.002912f, -0.001473f}, + {-0.003318f, -0.003160f, 0.003684f}, + {-0.034560f, -0.008740f, 0.012996f}, + {0.000166f, 0.001079f, -0.012153f}, + {0.017772f, 0.000937f, -0.011953f}}; +float flux2_latent_rgb_bias[3] = {-0.028738f, -0.098463f, -0.107619f}; + +// This one was taken straight from +// https://github.com/Stability-AI/sd3.5/blob/8565799a3b41eb0c7ba976d18375f0f753f56402/sd3_impls.py#L288-L303 +// (MiT Licence) +const float sd3_latent_rgb_proj[16][3] = { + {-0.0645f, 0.0177f, 0.1052f}, + {0.0028f, 0.0312f, 0.0650f}, + {0.1848f, 0.0762f, 0.0360f}, + {0.0944f, 0.0360f, 0.0889f}, + {0.0897f, 0.0506f, -0.0364f}, + {-0.0020f, 0.1203f, 0.0284f}, + {0.0855f, 0.0118f, 0.0283f}, + {-0.0539f, 0.0658f, 0.1047f}, + {-0.0057f, 0.0116f, 0.0700f}, + {-0.0412f, 0.0281f, -0.0039f}, + {0.1106f, 0.1171f, 0.1220f}, + {-0.0248f, 0.0682f, -0.0481f}, + {0.0815f, 0.0846f, 0.1207f}, + {-0.0120f, -0.0055f, -0.0867f}, + {-0.0749f, -0.0634f, -0.0456f}, + {-0.1418f, -0.1457f, -0.1259f}, +}; +float sd3_latent_rgb_bias[3] = {0, 0, 0}; + +const float sdxl_latent_rgb_proj[4][3] = { + {0.258303f, 0.277640f, 0.329699f}, + {-0.299701f, 0.105446f, 0.014194f}, + {0.050522f, 0.186163f, -0.143257f}, + {-0.211938f, -0.149892f, -0.080036f}}; +float sdxl_latent_rgb_bias[3] = {0.144381f, -0.033313f, 0.007061f}; + +const float sd_latent_rgb_proj[4][3] = { + {0.337366f, 0.216344f, 0.257386f}, + {0.165636f, 0.386828f, 0.046994f}, + {-0.267803f, 0.237036f, 0.223517f}, + {-0.178022f, -0.200862f, -0.678514f}}; +float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f}; + +void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) { + size_t buffer_head = 0; + + uint32_t latent_width = static_cast(latents->ne[0]); + uint32_t latent_height = static_cast(latents->ne[1]); + uint32_t dim = static_cast(latents->ne[ggml_n_dims(latents) - 1]); + uint32_t frames = 1; + if (ggml_n_dims(latents) == 4) { + frames = static_cast(latents->ne[2]); + } + + uint32_t rgb_width = latent_width * patch_size; + uint32_t rgb_height = latent_height * patch_size; + + uint32_t unpatched_dim = dim / (patch_size * patch_size); + + for (uint32_t k = 0; k < frames; k++) { + for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) { + for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) { + int latent_x = rgb_x / patch_size; + int latent_y = rgb_y / patch_size; + + int channel_offset = 0; + if (patch_size > 1) { + channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size)); + } + + size_t latent_id = (latent_x * latents->nb[0] + latent_y * latents->nb[1] + k * latents->nb[2]); + + // should be incremented by 1 for each pixel + size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x; + + float r = 0, g = 0, b = 0; + if (latent_rgb_proj != nullptr) { + for (uint32_t d = 0; d < unpatched_dim; d++) { + float value = *(float*)((char*)latents->data + latent_id + (d * patch_size * patch_size + channel_offset) * latents->nb[ggml_n_dims(latents) - 1]); + r += value * latent_rgb_proj[d][0]; + g += value * latent_rgb_proj[d][1]; + b += value * latent_rgb_proj[d][2]; + } + } else { + // interpret first 3 channels as RGB + r = *(float*)((char*)latents->data + latent_id + 0 * latents->nb[ggml_n_dims(latents) - 1]); + g = *(float*)((char*)latents->data + latent_id + 1 * latents->nb[ggml_n_dims(latents) - 1]); + b = *(float*)((char*)latents->data + latent_id + 2 * latents->nb[ggml_n_dims(latents) - 1]); + } + if (latent_rgb_bias != nullptr) { + // bias + r += latent_rgb_bias[0]; + g += latent_rgb_bias[1]; + b += latent_rgb_bias[2]; + } + // change range + r = r * .5f + .5f; + g = g * .5f + .5f; + b = b * .5f + .5f; + + // clamp rgb values to [0,1] range + r = r >= 0 ? r <= 1 ? r : 1 : 0; + g = g >= 0 ? g <= 1 ? g : 1 : 0; + b = b >= 0 ? b <= 1 ? b : 1 : 0; + + buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255); + buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255); + buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255); + } + } + } +} diff --git a/llm.hpp b/src/llm.hpp similarity index 93% rename from llm.hpp rename to src/llm.hpp index dc04c84c..5490f07c 100644 --- a/llm.hpp +++ b/src/llm.hpp @@ -19,6 +19,7 @@ #include "json.hpp" #include "rope.hpp" #include "tokenize_util.h" +#include "vocab/vocab.h" namespace LLM { constexpr int LLM_GRAPH_SIZE = 10240; @@ -195,14 +196,14 @@ namespace LLM { tokens.insert(tokens.begin(), BOS_TOKEN_ID); } if (max_length > 0 && padding) { - size_t n = std::ceil(tokens.size() * 1.0 / max_length); + size_t n = static_cast(std::ceil(tokens.size() * 1.f / max_length)); if (n == 0) { n = 1; } size_t length = max_length * n; LOG_DEBUG("token length: %llu", length); tokens.insert(tokens.end(), length - tokens.size(), PAD_TOKEN_ID); - weights.insert(weights.end(), length - weights.size(), 1.0); + weights.insert(weights.end(), length - weights.size(), 1.f); } } @@ -365,7 +366,7 @@ namespace LLM { if (merges_utf8_str.size() > 0) { load_from_merges(merges_utf8_str); } else { - load_from_merges(ModelLoader::load_qwen2_merges()); + load_from_merges(load_qwen2_merges()); } } }; @@ -377,7 +378,7 @@ namespace LLM { try { vocab = nlohmann::json::parse(vocab_utf8_str); - } catch (const nlohmann::json::parse_error& e) { + } catch (const nlohmann::json::parse_error&) { GGML_ABORT("invalid vocab json str"); } for (const auto& [key, value] : vocab.items()) { @@ -386,7 +387,7 @@ namespace LLM { encoder[token] = i; decoder[i] = token; } - encoder_len = vocab.size(); + encoder_len = static_cast(vocab.size()); LOG_DEBUG("vocab size: %d", encoder_len); auto byte_unicode_pairs = bytes_to_unicode(); @@ -466,7 +467,7 @@ namespace LLM { if (merges_utf8_str.size() > 0 && vocab_utf8_str.size() > 0) { load_from_merges(merges_utf8_str, vocab_utf8_str); } else { - load_from_merges(ModelLoader::load_mistral_merges(), ModelLoader::load_mistral_vocab_json()); + load_from_merges(load_mistral_merges(), load_mistral_vocab_json()); } } }; @@ -485,16 +486,16 @@ namespace LLM { }; struct LLMVisionParams { - int64_t num_layers = 32; + int num_layers = 32; int64_t hidden_size = 1280; int64_t intermediate_size = 3420; - int64_t num_heads = 16; + int num_heads = 16; int64_t in_channels = 3; int64_t out_hidden_size = 3584; - int64_t temporal_patch_size = 2; - int64_t patch_size = 14; - int64_t spatial_merge_size = 2; - int64_t window_size = 112; + int temporal_patch_size = 2; + int patch_size = 14; + int spatial_merge_size = 2; + int window_size = 112; std::set fullatt_block_indexes = {7, 15, 23, 31}; }; @@ -503,9 +504,9 @@ namespace LLM { int64_t num_layers = 28; int64_t hidden_size = 3584; int64_t intermediate_size = 18944; - int64_t num_heads = 28; - int64_t num_kv_heads = 4; - int64_t head_dim = 128; + int num_heads = 28; + int num_kv_heads = 4; + int head_dim = 128; bool qkv_bias = true; bool qk_norm = false; int64_t vocab_size = 152064; @@ -638,7 +639,7 @@ namespace LLM { x = ln_q->forward(ctx, x); x = ggml_reshape_2d(ctx->ggml_ctx, x, hidden_size, ggml_nelements(x) / hidden_size); x = mlp_0->forward(ctx, x); - x = ggml_gelu(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x); x = mlp_2->forward(ctx, x); return x; } @@ -647,15 +648,15 @@ namespace LLM { struct VisionAttention : public GGMLBlock { protected: bool llama_cpp_style; - int64_t head_dim; - int64_t num_heads; + int head_dim; + int num_heads; public: VisionAttention(bool llama_cpp_style, int64_t hidden_size, - int64_t num_heads) + int num_heads) : llama_cpp_style(llama_cpp_style), num_heads(num_heads) { - head_dim = hidden_size / num_heads; + head_dim = static_cast(hidden_size / num_heads); GGML_ASSERT(num_heads * head_dim == hidden_size); if (llama_cpp_style) { blocks["q_proj"] = std::shared_ptr(new Linear(hidden_size, hidden_size)); @@ -709,7 +710,7 @@ namespace LLM { VisionBlock(bool llama_cpp_style, int64_t hidden_size, int64_t intermediate_size, - int64_t num_heads, + int num_heads, float eps = 1e-6f) { blocks["attn"] = std::shared_ptr(new VisionAttention(llama_cpp_style, hidden_size, num_heads)); blocks["mlp"] = std::shared_ptr(new MLP(hidden_size, intermediate_size, true)); @@ -743,22 +744,22 @@ namespace LLM { struct VisionModel : public GGMLBlock { protected: - int64_t num_layers; - int64_t spatial_merge_size; + int num_layers; + int spatial_merge_size; std::set fullatt_block_indexes; public: VisionModel(bool llama_cpp_style, - int64_t num_layers, + int num_layers, int64_t in_channels, int64_t hidden_size, int64_t out_hidden_size, int64_t intermediate_size, - int64_t num_heads, - int64_t spatial_merge_size, - int64_t patch_size, - int64_t temporal_patch_size, - int64_t window_size, + int num_heads, + int spatial_merge_size, + int patch_size, + int temporal_patch_size, + int window_size, std::set fullatt_block_indexes = {7, 15, 23, 31}, float eps = 1e-6f) : num_layers(num_layers), fullatt_block_indexes(std::move(fullatt_block_indexes)), spatial_merge_size(spatial_merge_size) { @@ -817,7 +818,7 @@ namespace LLM { struct Attention : public GGMLBlock { protected: LLMArch arch; - int64_t head_dim; + int head_dim; int64_t num_heads; int64_t num_kv_heads; bool qk_norm; @@ -837,7 +838,8 @@ namespace LLM { struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, - struct ggml_tensor* input_pos) { + struct ggml_tensor* input_pos, + struct ggml_tensor* attention_mask = nullptr) { // x: [N, n_token, hidden_size] int64_t n_token = x->ne[1]; int64_t N = x->ne[2]; @@ -880,7 +882,7 @@ namespace LLM { k = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, k, 0, 2, 1, 3)); // [N, num_kv_heads, n_token, head_dim] k = ggml_reshape_3d(ctx->ggml_ctx, k, k->ne[0], k->ne[1], k->ne[2] * k->ne[3]); // [N*num_kv_heads, n_token, head_dim] - x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, nullptr, true, true, false); // [N, n_token, hidden_size] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, attention_mask, true, false); // [N, n_token, hidden_size] x = out_proj->forward(ctx, x); // [N, n_token, hidden_size] return x; @@ -898,7 +900,8 @@ namespace LLM { struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, - struct ggml_tensor* input_pos) { + struct ggml_tensor* input_pos, + struct ggml_tensor* attention_mask = nullptr) { // x: [N, n_token, hidden_size] auto self_attn = std::dynamic_pointer_cast(blocks["self_attn"]); auto mlp = std::dynamic_pointer_cast(blocks["mlp"]); @@ -907,7 +910,7 @@ namespace LLM { auto residual = x; x = input_layernorm->forward(ctx, x); - x = self_attn->forward(ctx, x, input_pos); + x = self_attn->forward(ctx, x, input_pos, attention_mask); x = ggml_add_inplace(ctx->ggml_ctx, x, residual); residual = x; @@ -936,6 +939,7 @@ namespace LLM { struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* input_ids, struct ggml_tensor* input_pos, + struct ggml_tensor* attention_mask, std::vector> image_embeds, std::set out_layers) { // input_ids: [N, n_token] @@ -990,7 +994,7 @@ namespace LLM { for (int i = 0; i < num_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["layers." + std::to_string(i)]); - x = block->forward(ctx, x, input_pos); + x = block->forward(ctx, x, input_pos, attention_mask); if (out_layers.find(i + 1) != out_layers.end()) { intermediate_outputs.push_back(x); } @@ -1036,12 +1040,13 @@ namespace LLM { struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* input_ids, struct ggml_tensor* input_pos, + struct ggml_tensor* attention_mask, std::vector> image_embeds, std::set out_layers) { // input_ids: [N, n_token] auto model = std::dynamic_pointer_cast(blocks["model"]); - auto x = model->forward(ctx, input_ids, input_pos, image_embeds, out_layers); + auto x = model->forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); return x; } @@ -1063,6 +1068,7 @@ namespace LLM { LLM model; std::vector input_pos_vec; + std::vector attention_mask_vec; std::vector window_mask_vec; std::vector window_index_vec; std::vector window_inverse_index_vec; @@ -1157,9 +1163,10 @@ namespace LLM { struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* input_ids, struct ggml_tensor* input_pos, + struct ggml_tensor* attention_mask, std::vector> image_embeds, std::set out_layers) { - auto hidden_states = model.forward(ctx, input_ids, input_pos, image_embeds, out_layers); // [N, n_token, hidden_size] + auto hidden_states = model.forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); // [N, n_token, hidden_size] return hidden_states; } @@ -1174,6 +1181,7 @@ namespace LLM { } struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids, + struct ggml_tensor* attention_mask, std::vector> image_embeds, std::set out_layers) { struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); @@ -1205,9 +1213,26 @@ namespace LLM { input_pos_vec.size()); set_backend_tensor_data(input_pos, input_pos_vec.data()); + if (attention_mask != nullptr) { + attention_mask = to_backend(attention_mask); + } else { + attention_mask_vec.resize(n_tokens * n_tokens); + for (int i0 = 0; i0 < n_tokens; i0++) { + for (int i1 = 0; i1 < n_tokens; i1++) { + float value = 0.f; + if (i0 > i1) { + value = -INFINITY; + } + attention_mask_vec[i1 * n_tokens + i0] = value; + } + } + attention_mask = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, n_tokens, n_tokens); + set_backend_tensor_data(attention_mask, attention_mask_vec.data()); + } + auto runner_ctx = get_context(); - struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, image_embeds, out_layers); + struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); ggml_build_forward_expand(gf, hidden_states); @@ -1216,22 +1241,23 @@ namespace LLM { bool compute(const int n_threads, struct ggml_tensor* input_ids, + struct ggml_tensor* attention_mask, std::vector> image_embeds, std::set out_layers, ggml_tensor** output, ggml_context* output_ctx = nullptr) { auto get_graph = [&]() -> struct ggml_cgraph* { - return build_graph(input_ids, image_embeds, out_layers); + return build_graph(input_ids, attention_mask, image_embeds, out_layers); }; return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx); } int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) { - int grid_t = 1; - int grid_h = h / params.vision.patch_size; - int grid_w = w / params.vision.patch_size; - int llm_grid_h = grid_h / params.vision.spatial_merge_size; - int llm_grid_w = grid_w / params.vision.spatial_merge_size; + int64_t grid_t = 1; + int64_t grid_h = h / params.vision.patch_size; + int64_t grid_w = w / params.vision.patch_size; + int64_t llm_grid_h = grid_h / params.vision.spatial_merge_size; + int64_t llm_grid_w = grid_w / params.vision.spatial_merge_size; return grid_t * grid_h * grid_w; } @@ -1269,8 +1295,8 @@ namespace LLM { GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0); int grid_t = 1; - int grid_h = image->ne[1] / params.vision.patch_size; - int grid_w = image->ne[0] / params.vision.patch_size; + int grid_h = static_cast(image->ne[1]) / params.vision.patch_size; + int grid_w = static_cast(image->ne[0]) / params.vision.patch_size; int llm_grid_h = grid_h / params.vision.spatial_merge_size; int llm_grid_w = grid_w / params.vision.spatial_merge_size; int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size; @@ -1358,14 +1384,14 @@ namespace LLM { set_backend_tensor_data(window_mask, window_mask_vec.data()); // pe - int head_dim = params.vision.hidden_size / params.vision.num_heads; + int head_dim = static_cast(params.vision.hidden_size / params.vision.num_heads); pe_vec = Rope::gen_qwen2vl_pe(grid_h, grid_w, params.vision.spatial_merge_size, window_inverse_index_vec, - 10000.f, + 10000, {head_dim / 2, head_dim / 2}); - int pos_len = pe_vec.size() / head_dim / 2; + int pos_len = static_cast(pe_vec.size() / head_dim / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, head_dim / 2, pos_len); // pe->data = pe_vec.data(); @@ -1485,13 +1511,13 @@ namespace LLM { print_ggml_tensor(image, false, "image"); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.encode_image(8, image, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out, false, "image_embed"); image_embed = out; - LOG_DEBUG("llm encode_image test done in %dms", t1 - t0); + LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0); } std::string placeholder = "<|image_pad|>"; @@ -1524,12 +1550,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); - model.compute(8, input_ids, image_embeds, {}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); + model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else if (test_vit) { // auto image = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 280, 280, 3); // ggml_set_f32(image, 0.f); @@ -1537,16 +1563,16 @@ namespace LLM { print_ggml_tensor(image, false, "image"); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.encode_image(8, image, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out, false, "out"); // auto ref_out = load_tensor_from_file(work_ctx, "qwen2vl.bin"); // ggml_ext_tensor_diff(ref_out, out, 0.01f); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else if (test_mistral) { std::pair prompt_attn_range; std::string text = "[SYSTEM_PROMPT]You are an AI that reasons about image descriptions. You give structured responses focusing on object relationships, object\nattribution and actions without speculation.[/SYSTEM_PROMPT][INST]"; @@ -1564,12 +1590,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); - model.compute(8, input_ids, {}, {10, 20, 30}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); + model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else if (test_qwen3) { std::pair prompt_attn_range; std::string text = "<|im_start|>user\n"; @@ -1587,12 +1613,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); - model.compute(8, input_ids, {}, {35}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); + model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } else { std::pair prompt_attn_range; std::string text = "<|im_start|>system\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:<|im_end|>\n<|im_start|>user\n"; @@ -1610,12 +1636,12 @@ namespace LLM { auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); - model.compute(8, input_ids, {}, {}, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); + model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("llm test done in %dms", t1 - t0); + LOG_DEBUG("llm test done in %lldms", t1 - t0); } } diff --git a/lora.hpp b/src/lora.hpp similarity index 84% rename from lora.hpp rename to src/lora.hpp index b847f044..d2f91cd4 100644 --- a/lora.hpp +++ b/src/lora.hpp @@ -195,7 +195,7 @@ struct LoraModel : public GGMLRunner { scale_value *= multiplier; auto curr_updown = ggml_ext_merge_lora(ctx, lora_down, lora_up, lora_mid); - curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value); + curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true); if (updown == nullptr) { updown = curr_updown; @@ -235,7 +235,7 @@ struct LoraModel : public GGMLRunner { float scale_value = 1.0f; scale_value *= multiplier; - curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value); + curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true); if (updown == nullptr) { updown = curr_updown; @@ -340,7 +340,7 @@ struct LoraModel : public GGMLRunner { struct ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid); struct ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid); auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2); - curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value); + curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true); if (updown == nullptr) { updown = curr_updown; } else { @@ -456,7 +456,7 @@ struct LoraModel : public GGMLRunner { scale_value *= multiplier; auto curr_updown = ggml_ext_kronecker(ctx, lokr_w1, lokr_w2); - curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value); + curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true); if (updown == nullptr) { updown = curr_updown; @@ -468,10 +468,10 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora = true) { + ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { // lora ggml_tensor* diff = nullptr; - if (with_lora) { + if (with_lora_and_lokr) { diff = get_lora_weight_diff(model_tensor_name, ctx); } // diff @@ -483,7 +483,7 @@ struct LoraModel : public GGMLRunner { diff = get_loha_weight_diff(model_tensor_name, ctx); } // lokr - if (diff == nullptr) { + if (diff == nullptr && with_lora_and_lokr) { diff = get_lokr_weight_diff(model_tensor_name, ctx); } if (diff != nullptr) { @@ -514,6 +514,108 @@ struct LoraModel : public GGMLRunner { } else { key = model_tensor_name + "." + std::to_string(index); } + bool is_conv2d = forward_params.op_type == WeightAdapter::ForwardParams::op_type_t::OP_CONV2D; + + std::string lokr_w1_name = "lora." + key + ".lokr_w1"; + std::string lokr_w1_a_name = "lora." + key + ".lokr_w1_a"; + // if either of these is found, then we have a lokr lora + auto iter = lora_tensors.find(lokr_w1_name); + auto iter_a = lora_tensors.find(lokr_w1_a_name); + if (iter != lora_tensors.end() || iter_a != lora_tensors.end()) { + std::string lokr_w1_b_name = "lora." + key + ".lokr_w1_b"; + std::string lokr_w2_name = "lora." + key + ".lokr_w2"; + std::string lokr_w2_a_name = "lora." + key + ".lokr_w2_a"; + std::string lokr_w2_b_name = "lora." + key + ".lokr_w2_b"; + std::string alpha_name = "lora." + key + ".alpha"; + + ggml_tensor* lokr_w1 = nullptr; + ggml_tensor* lokr_w1_a = nullptr; + ggml_tensor* lokr_w1_b = nullptr; + ggml_tensor* lokr_w2 = nullptr; + ggml_tensor* lokr_w2_a = nullptr; + ggml_tensor* lokr_w2_b = nullptr; + + if (iter != lora_tensors.end()) { + lokr_w1 = iter->second; + } + iter = iter_a; + if (iter != lora_tensors.end()) { + lokr_w1_a = iter->second; + } + iter = lora_tensors.find(lokr_w1_b_name); + if (iter != lora_tensors.end()) { + lokr_w1_b = iter->second; + } + + iter = lora_tensors.find(lokr_w2_name); + if (iter != lora_tensors.end()) { + lokr_w2 = iter->second; + if (is_conv2d && lokr_w2->type != GGML_TYPE_F16) { + lokr_w2 = ggml_cast(ctx, lokr_w2, GGML_TYPE_F16); + } + } + iter = lora_tensors.find(lokr_w2_a_name); + if (iter != lora_tensors.end()) { + lokr_w2_a = iter->second; + if (is_conv2d && lokr_w2_a->type != GGML_TYPE_F16) { + lokr_w2_a = ggml_cast(ctx, lokr_w2_a, GGML_TYPE_F16); + } + } + iter = lora_tensors.find(lokr_w2_b_name); + if (iter != lora_tensors.end()) { + lokr_w2_b = iter->second; + if (is_conv2d && lokr_w2_b->type != GGML_TYPE_F16) { + lokr_w2_b = ggml_cast(ctx, lokr_w2_b, GGML_TYPE_F16); + } + } + + int rank = 1; + if (lokr_w1_b) { + rank = (int)lokr_w1_b->ne[ggml_n_dims(lokr_w1_b) - 1]; + } + if (lokr_w2_b) { + rank = (int)lokr_w2_b->ne[ggml_n_dims(lokr_w2_b) - 1]; + } + + float scale_value = 1.0f; + iter = lora_tensors.find(alpha_name); + if (iter != lora_tensors.end()) { + float alpha = ggml_ext_backend_tensor_get_f32(iter->second); + scale_value = alpha / rank; + applied_lora_tensors.insert(alpha_name); + } + + if (rank == 1) { + scale_value = 1.0f; + } + scale_value *= multiplier; + + auto curr_out_diff = ggml_ext_lokr_forward(ctx, x, lokr_w1, lokr_w1_a, lokr_w1_b, lokr_w2, lokr_w2_a, lokr_w2_b, is_conv2d, forward_params.conv2d, scale_value); + if (out_diff == nullptr) { + out_diff = curr_out_diff; + } else { + out_diff = ggml_concat(ctx, out_diff, curr_out_diff, 0); + } + + if (lokr_w1) + applied_lora_tensors.insert(lokr_w1_name); + if (lokr_w1_a) + applied_lora_tensors.insert(lokr_w1_a_name); + if (lokr_w1_b) + applied_lora_tensors.insert(lokr_w1_b_name); + if (lokr_w2) + applied_lora_tensors.insert(lokr_w2_name); + if (lokr_w2_a) + applied_lora_tensors.insert(lokr_w2_name); + if (lokr_w2_b) + applied_lora_tensors.insert(lokr_w2_b_name); + applied_lora_tensors.insert(alpha_name); + + index++; + continue; + } + + // not a lokr, normal lora path std::string lora_down_name = "lora." + key + ".lora_down"; std::string lora_up_name = "lora." + key + ".lora_up"; @@ -525,9 +627,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* lora_mid = nullptr; ggml_tensor* lora_down = nullptr; - bool is_conv2d = forward_params.op_type == WeightAdapter::ForwardParams::op_type_t::OP_CONV2D; - - auto iter = lora_tensors.find(lora_up_name); + iter = lora_tensors.find(lora_up_name); if (iter != lora_tensors.end()) { lora_up = iter->second; if (is_conv2d && lora_up->type != GGML_TYPE_F16) { @@ -599,6 +699,8 @@ struct LoraModel : public GGMLRunner { forward_params.conv2d.d0, forward_params.conv2d.d1, forward_params.conv2d.direct, + forward_params.conv2d.circular_x, + forward_params.conv2d.circular_y, forward_params.conv2d.scale); if (lora_mid) { lx = ggml_ext_conv_2d(ctx, @@ -612,6 +714,8 @@ struct LoraModel : public GGMLRunner { 1, 1, forward_params.conv2d.direct, + forward_params.conv2d.circular_x, + forward_params.conv2d.circular_y, forward_params.conv2d.scale); } lx = ggml_ext_conv_2d(ctx, @@ -625,10 +729,12 @@ struct LoraModel : public GGMLRunner { 1, 1, forward_params.conv2d.direct, + forward_params.conv2d.circular_x, + forward_params.conv2d.circular_y, forward_params.conv2d.scale); } - auto curr_out_diff = ggml_scale_inplace(ctx, lx, scale_value); + auto curr_out_diff = ggml_ext_scale(ctx, lx, scale_value, true); if (out_diff == nullptr) { out_diff = curr_out_diff; @@ -735,9 +841,9 @@ public: : lora_models(lora_models) { } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name, bool with_lora) { + ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { for (auto& lora_model : lora_models) { - ggml_tensor* diff = lora_model->get_weight_diff(weight_name, ctx, weight, with_lora); + ggml_tensor* diff = lora_model->get_weight_diff(weight_name, ctx, weight, with_lora_and_lokr); if (diff == nullptr) { continue; } @@ -779,6 +885,8 @@ public: forward_params.conv2d.d0, forward_params.conv2d.d1, forward_params.conv2d.direct, + forward_params.conv2d.circular_x, + forward_params.conv2d.circular_y, forward_params.conv2d.scale); } for (auto& lora_model : lora_models) { diff --git a/ltxv.hpp b/src/ltxv.hpp similarity index 98% rename from ltxv.hpp rename to src/ltxv.hpp index 0a2877a8..9dcdd4b2 100644 --- a/ltxv.hpp +++ b/src/ltxv.hpp @@ -1,8 +1,7 @@ #ifndef __LTXV_HPP__ #define __LTXV_HPP__ -#include "common.hpp" -#include "ggml_extend.hpp" +#include "common_block.hpp" namespace LTXV { diff --git a/mmdit.hpp b/src/mmdit.hpp similarity index 86% rename from mmdit.hpp rename to src/mmdit.hpp index 38bdc2e7..ba1c35d6 100644 --- a/mmdit.hpp +++ b/src/mmdit.hpp @@ -33,7 +33,7 @@ public: auto fc2 = std::dynamic_pointer_cast(blocks["fc2"]); x = fc1->forward(ctx, x); - x = ggml_gelu_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); x = fc2->forward(ctx, x); return x; } @@ -97,12 +97,12 @@ public: struct TimestepEmbedder : public GGMLBlock { // Embeds scalar timesteps into vector representations. protected: - int64_t frequency_embedding_size; + int frequency_embedding_size; public: TimestepEmbedder(int64_t hidden_size, - int64_t frequency_embedding_size = 256, - int64_t out_channels = 0) + int frequency_embedding_size = 256, + int64_t out_channels = 0) : frequency_embedding_size(frequency_embedding_size) { if (out_channels <= 0) { out_channels = hidden_size; @@ -167,11 +167,11 @@ public: blocks["proj"] = std::shared_ptr(new Linear(dim, dim)); } if (qk_norm == "rms") { - blocks["ln_q"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6)); - blocks["ln_k"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6)); + blocks["ln_q"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6f)); + blocks["ln_k"] = std::shared_ptr(new RMSNorm(d_head, 1.0e-6f)); } else if (qk_norm == "ln") { - blocks["ln_q"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6)); - blocks["ln_k"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6)); + blocks["ln_q"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6f)); + blocks["ln_k"] = std::shared_ptr(new LayerNorm(d_head, 1.0e-6f)); } } @@ -211,8 +211,8 @@ public: struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { auto qkv = pre_attention(ctx, x); - x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] - x = post_attention(ctx, x); // [N, n_token, dim] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] + x = post_attention(ctx, x); // [N, n_token, dim] return x; } }; @@ -284,23 +284,19 @@ public: auto attn2 = std::dynamic_pointer_cast(blocks["attn2"]); auto adaLN_modulation_1 = std::dynamic_pointer_cast(blocks["adaLN_modulation.1"]); - int64_t n_mods = 9; - auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, n_mods * hidden_size] - m = ggml_reshape_3d(ctx->ggml_ctx, m, c->ne[0], n_mods, c->ne[1]); // [N, n_mods, hidden_size] - m = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, m, 0, 2, 1, 3)); // [n_mods, N, hidden_size] + int n_mods = 9; + auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, n_mods * hidden_size] + auto m_vec = ggml_ext_chunk(ctx->ggml_ctx, m, n_mods, 0); - int64_t offset = m->nb[1] * m->ne[1]; - auto shift_msa = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 0); // [N, hidden_size] - auto scale_msa = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 1); // [N, hidden_size] - auto gate_msa = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 2); // [N, hidden_size] - - auto shift_mlp = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 3); // [N, hidden_size] - auto scale_mlp = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 4); // [N, hidden_size] - auto gate_mlp = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 5); // [N, hidden_size] - - auto shift_msa2 = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 6); // [N, hidden_size] - auto scale_msa2 = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 7); // [N, hidden_size] - auto gate_msa2 = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 8); // [N, hidden_size] + auto shift_msa = m_vec[0]; // [N, hidden_size] + auto scale_msa = m_vec[1]; // [N, hidden_size] + auto gate_msa = m_vec[2]; // [N, hidden_size] + auto shift_mlp = m_vec[3]; // [N, hidden_size] + auto scale_mlp = m_vec[4]; // [N, hidden_size] + auto gate_mlp = m_vec[5]; // [N, hidden_size] + auto shift_msa2 = m_vec[6]; // [N, hidden_size] + auto scale_msa2 = m_vec[7]; // [N, hidden_size] + auto gate_msa2 = m_vec[8]; // [N, hidden_size] auto x_norm = norm1->forward(ctx, x); @@ -322,22 +318,20 @@ public: auto attn = std::dynamic_pointer_cast(blocks["attn"]); auto adaLN_modulation_1 = std::dynamic_pointer_cast(blocks["adaLN_modulation.1"]); - int64_t n_mods = 6; + int n_mods = 6; if (pre_only) { n_mods = 2; } - auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, n_mods * hidden_size] - m = ggml_reshape_3d(ctx->ggml_ctx, m, c->ne[0], n_mods, c->ne[1]); // [N, n_mods, hidden_size] - m = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, m, 0, 2, 1, 3)); // [n_mods, N, hidden_size] + auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, n_mods * hidden_size] + auto m_vec = ggml_ext_chunk(ctx->ggml_ctx, m, n_mods, 0); - int64_t offset = m->nb[1] * m->ne[1]; - auto shift_msa = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 0); // [N, hidden_size] - auto scale_msa = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 1); // [N, hidden_size] + auto shift_msa = m_vec[0]; // [N, hidden_size] + auto scale_msa = m_vec[1]; // [N, hidden_size] if (!pre_only) { - auto gate_msa = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 2); // [N, hidden_size] - auto shift_mlp = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 3); // [N, hidden_size] - auto scale_mlp = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 4); // [N, hidden_size] - auto gate_mlp = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 5); // [N, hidden_size] + auto gate_msa = m_vec[2]; // [N, hidden_size] + auto shift_mlp = m_vec[3]; // [N, hidden_size] + auto scale_mlp = m_vec[4]; // [N, hidden_size] + auto gate_mlp = m_vec[5]; // [N, hidden_size] auto attn_in = modulate(ctx->ggml_ctx, norm1->forward(ctx, x), shift_msa, scale_msa); @@ -439,8 +433,8 @@ public: auto qkv2 = std::get<1>(qkv_intermediates); auto intermediates = std::get<2>(qkv_intermediates); - auto attn_out = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] - auto attn2_out = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv2[0], qkv2[1], qkv2[2], num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] + auto attn_out = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] + auto attn2_out = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv2[0], qkv2[1], qkv2[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] x = post_attention_x(ctx, attn_out, attn2_out, @@ -456,7 +450,7 @@ public: auto qkv = qkv_intermediates.first; auto intermediates = qkv_intermediates.second; - auto attn_out = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] + auto attn_out = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] x = post_attention(ctx, attn_out, intermediates[0], @@ -500,26 +494,24 @@ block_mixing(GGMLRunnerContext* ctx, qkv.push_back(ggml_concat(ctx->ggml_ctx, context_qkv[i], x_qkv[i], 1)); } - auto attn = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], x_block->num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_context + n_token, hidden_size] - attn = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, attn, 0, 2, 1, 3)); // [n_context + n_token, N, hidden_size] + auto attn = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], x_block->num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_context + n_token, hidden_size] + auto context_attn = ggml_view_3d(ctx->ggml_ctx, attn, attn->ne[0], - attn->ne[1], context->ne[1], + attn->ne[2], attn->nb[1], attn->nb[2], - 0); // [n_context, N, hidden_size] - context_attn = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, context_attn, 0, 2, 1, 3)); // [N, n_context, hidden_size] + 0); // [N, n_context, hidden_size] auto x_attn = ggml_view_3d(ctx->ggml_ctx, attn, attn->ne[0], - attn->ne[1], x->ne[1], + attn->ne[2], attn->nb[1], attn->nb[2], - attn->nb[2] * context->ne[1]); // [n_token, N, hidden_size] - x_attn = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x_attn, 0, 2, 1, 3)); // [N, n_token, hidden_size] + context->ne[1] * attn->nb[1]); // [N, n_token, hidden_size] if (!context_block->pre_only) { context = context_block->post_attention(ctx, @@ -534,7 +526,7 @@ block_mixing(GGMLRunnerContext* ctx, } if (x_block->self_attn) { - auto attn2 = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, x_qkv2[0], x_qkv2[1], x_qkv2[2], x_block->num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, hidden_size] + auto attn2 = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, x_qkv2[0], x_qkv2[1], x_qkv2[2], x_block->num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, hidden_size] x = x_block->post_attention_x(ctx, x_attn, @@ -604,13 +596,10 @@ public: auto linear = std::dynamic_pointer_cast(blocks["linear"]); auto adaLN_modulation_1 = std::dynamic_pointer_cast(blocks["adaLN_modulation.1"]); - auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 2 * hidden_size] - m = ggml_reshape_3d(ctx->ggml_ctx, m, c->ne[0], 2, c->ne[1]); // [N, 2, hidden_size] - m = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, m, 0, 2, 1, 3)); // [2, N, hidden_size] - - int64_t offset = m->nb[1] * m->ne[1]; - auto shift = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 0); // [N, hidden_size] - auto scale = ggml_view_2d(ctx->ggml_ctx, m, m->ne[0], m->ne[1], m->nb[1], offset * 1); // [N, hidden_size] + auto m = adaLN_modulation_1->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 2 * hidden_size] + auto m_vec = ggml_ext_chunk(ctx->ggml_ctx, m, 2, 0); + auto shift = m_vec[0]; // [N, hidden_size] + auto scale = m_vec[1]; // [N, hidden_size] x = modulate(ctx->ggml_ctx, norm_final->forward(ctx, x), shift, scale); x = linear->forward(ctx, x); @@ -623,7 +612,7 @@ struct MMDiT : public GGMLBlock { // Diffusion model with a Transformer backbone. protected: int64_t input_size = -1; - int64_t patch_size = 2; + int patch_size = 2; int64_t in_channels = 16; int64_t d_self = -1; // >=0 for MMdiT-X int64_t depth = 24; @@ -756,28 +745,6 @@ public: return spatial_pos_embed; } - struct ggml_tensor* unpatchify(struct ggml_context* ctx, - struct ggml_tensor* x, - int64_t h, - int64_t w) { - // x: [N, H*W, patch_size * patch_size * C] - // return: [N, C, H, W] - int64_t n = x->ne[2]; - int64_t c = out_channels; - int64_t p = patch_size; - h = (h + 1) / p; - w = (w + 1) / p; - - GGML_ASSERT(h * w == x->ne[1]); - - x = ggml_reshape_4d(ctx, x, c, p * p, w * h, n); // [N, H*W, P*P, C] - x = ggml_cont(ctx, ggml_permute(ctx, x, 2, 0, 1, 3)); // [N, C, H*W, P*P] - x = ggml_reshape_4d(ctx, x, p, p, w, h * c * n); // [N*C*H, W, P, P] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*H, P, W, P] - x = ggml_reshape_4d(ctx, x, p * w, p * h, c, n); // [N, C, H*P, W*P] - return x; - } - struct ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* c_mod, @@ -822,11 +789,11 @@ public: auto x_embedder = std::dynamic_pointer_cast(blocks["x_embedder"]); auto t_embedder = std::dynamic_pointer_cast(blocks["t_embedder"]); - int64_t w = x->ne[0]; - int64_t h = x->ne[1]; + int64_t W = x->ne[0]; + int64_t H = x->ne[1]; auto patch_embed = x_embedder->forward(ctx, x); // [N, H*W, hidden_size] - auto pos_embed = cropped_pos_embed(ctx->ggml_ctx, h, w); // [1, H*W, hidden_size] + auto pos_embed = cropped_pos_embed(ctx->ggml_ctx, H, W); // [1, H*W, hidden_size] x = ggml_add(ctx->ggml_ctx, patch_embed, pos_embed); // [N, H*W, hidden_size] auto c = t_embedder->forward(ctx, t); // [N, hidden_size] @@ -845,7 +812,7 @@ public: x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels) - x = unpatchify(ctx->ggml_ctx, x, h, w); // [N, C, H, W] + x = DiT::unpatchify_and_crop(ctx->ggml_ctx, x, H, W, patch_size, patch_size, /*patch_last*/ false); // [N, C, H, W] return x; } @@ -943,12 +910,12 @@ struct MMDiTRunner : public GGMLRunner { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, y, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("mmdit test done in %dms", t1 - t0); + LOG_DEBUG("mmdit test done in %lldms", t1 - t0); } } @@ -983,4 +950,4 @@ struct MMDiTRunner : public GGMLRunner { } }; -#endif \ No newline at end of file +#endif diff --git a/model.cpp b/src/model.cpp similarity index 95% rename from model.cpp rename to src/model.cpp index 01a8c45d..77b032c2 100644 --- a/model.cpp +++ b/src/model.cpp @@ -16,10 +16,6 @@ #include "model.h" #include "stable-diffusion.h" #include "util.h" -#include "vocab.hpp" -#include "vocab_mistral.hpp" -#include "vocab_qwen.hpp" -#include "vocab_umt5.hpp" #include "ggml-alloc.h" #include "ggml-backend.h" @@ -376,7 +372,11 @@ bool ModelLoader::init_from_file(const std::string& file_path, const std::string LOG_INFO("load %s using checkpoint format", file_path.c_str()); return init_from_ckpt_file(file_path, prefix); } else { - LOG_WARN("unknown format %s", file_path.c_str()); + if (file_exists(file_path)) { + LOG_WARN("unknown format %s", file_path.c_str()); + } else { + LOG_WARN("file %s not found", file_path.c_str()); + } return false; } } @@ -436,7 +436,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s name, gguf_tensor_info.type, gguf_tensor_info.shape.data(), - gguf_tensor_info.shape.size(), + static_cast(gguf_tensor_info.shape.size()), file_index, data_offset + gguf_tensor_info.offset); @@ -448,7 +448,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s return true; } - int n_tensors = gguf_get_n_tensors(ctx_gguf_); + int n_tensors = static_cast(gguf_get_n_tensors(ctx_gguf_)); size_t total_size = 0; size_t data_offset = gguf_get_data_offset(ctx_gguf_); @@ -1034,10 +1034,14 @@ SDVersion ModelLoader::get_sd_version() { bool is_xl = false; bool is_flux = false; + bool is_flux2 = false; + bool has_single_block_47 = false; bool is_wan = false; int64_t patch_embedding_channels = 0; bool has_img_emb = false; bool has_middle_block_1 = false; + bool has_output_block_311 = false; + bool has_output_block_71 = false; for (auto& [name, tensor_storage] : tensor_storage_map) { if (!(is_xl)) { @@ -1053,8 +1057,14 @@ SDVersion ModelLoader::get_sd_version() { if (tensor_storage.name.find("model.diffusion_model.transformer_blocks.0.img_mod.1.weight") != std::string::npos) { return VERSION_QWEN_IMAGE; } + if (tensor_storage.name.find("llm_adapter.blocks.0.cross_attn.q_proj.weight") != std::string::npos) { + return VERSION_ANIMA; + } if (tensor_storage.name.find("model.diffusion_model.double_stream_modulation_img.lin.weight") != std::string::npos) { - return VERSION_FLUX2; + is_flux2 = true; + } + if (tensor_storage.name.find("single_blocks.47.linear1.weight") != std::string::npos) { + has_single_block_47 = true; } if (tensor_storage.name.find("model.diffusion_model.double_blocks.0.img_mlp.gate_proj.weight") != std::string::npos) { return VERSION_OVIS_IMAGE; @@ -1094,6 +1104,12 @@ SDVersion ModelLoader::get_sd_version() { tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) { has_middle_block_1 = true; } + if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos) { + has_output_block_311 = true; + } + if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos) { + has_output_block_71 = true; + } if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" || tensor_storage.name == "cond_stage_model.model.token_embedding.weight" || tensor_storage.name == "text_model.embeddings.token_embedding.weight" || @@ -1129,12 +1145,15 @@ SDVersion ModelLoader::get_sd_version() { return VERSION_SDXL_PIX2PIX; } if (!has_middle_block_1) { + if (!has_output_block_311) { + return VERSION_SDXL_VEGA; + } return VERSION_SDXL_SSD1B; } return VERSION_SDXL; } - if (is_flux) { + if (is_flux && !is_flux2) { if (input_block_weight.ne[0] == 384) { return VERSION_FLUX_FILL; } @@ -1147,6 +1166,13 @@ SDVersion ModelLoader::get_sd_version() { return VERSION_FLUX; } + if (is_flux2) { + if (has_single_block_47) { + return VERSION_FLUX2; + } + return VERSION_FLUX2_KLEIN; + } + if (token_embedding_weight.ne[0] == 768) { if (is_inpaint) { return VERSION_SD1_INPAINT; @@ -1155,6 +1181,9 @@ SDVersion ModelLoader::get_sd_version() { return VERSION_SD1_PIX2PIX; } if (!has_middle_block_1) { + if (!has_output_block_71) { + return VERSION_SDXS; + } return VERSION_SD1_TINY_UNET; } return VERSION_SD1; @@ -1310,37 +1339,7 @@ void ModelLoader::set_wtype_override(ggml_type wtype, std::string tensor_type_ru } } -std::string ModelLoader::load_merges() { - std::string merges_utf8_str(reinterpret_cast(merges_utf8_c_str), sizeof(merges_utf8_c_str)); - return merges_utf8_str; -} - -std::string ModelLoader::load_qwen2_merges() { - std::string merges_utf8_str(reinterpret_cast(qwen2_merges_utf8_c_str), sizeof(qwen2_merges_utf8_c_str)); - return merges_utf8_str; -} - -std::string ModelLoader::load_mistral_merges() { - std::string merges_utf8_str(reinterpret_cast(mistral_merges_utf8_c_str), sizeof(mistral_merges_utf8_c_str)); - return merges_utf8_str; -} - -std::string ModelLoader::load_mistral_vocab_json() { - std::string json_str(reinterpret_cast(mistral_vocab_json_utf8_c_str), sizeof(mistral_vocab_json_utf8_c_str)); - return json_str; -} - -std::string ModelLoader::load_t5_tokenizer_json() { - std::string json_str(reinterpret_cast(t5_tokenizer_json_str), sizeof(t5_tokenizer_json_str)); - return json_str; -} - -std::string ModelLoader::load_umt5_tokenizer_json() { - std::string json_str(reinterpret_cast(umt5_tokenizer_json_str), sizeof(umt5_tokenizer_json_str)); - return json_str; -} - -bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p) { +bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p, bool enable_mmap) { int64_t process_time_ms = 0; std::atomic read_time_ms(0); std::atomic memcpy_time_ms(0); @@ -1390,6 +1389,15 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread } } + std::unique_ptr mmapped; + if (enable_mmap && !is_zip) { + LOG_DEBUG("using mmap for I/O"); + mmapped = MmapWrapper::create(file_path); + if (!mmapped) { + LOG_WARN("failed to memory-map '%s'", file_path.c_str()); + } + } + int n_threads = is_zip ? 1 : std::min(num_threads_to_use, (int)file_tensors.size()); if (n_threads < 1) { n_threads = 1; @@ -1411,7 +1419,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread failed = true; return; } - } else { + } else if (!mmapped) { file.open(file_path, std::ios::binary); if (!file.is_open()) { LOG_ERROR("failed to open '%s'", file_path.c_str()); @@ -1464,6 +1472,11 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread zip_entry_noallocread(zip, (void*)buf, n); } zip_entry_close(zip); + } else if (mmapped) { + if (!mmapped->copy_data(buf, n, tensor_storage.offset)) { + LOG_ERROR("read tensor data failed: '%s'", file_path.c_str()); + failed = true; + } } else { file.seekg(tensor_storage.offset); file.read(buf, n); @@ -1556,7 +1569,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread break; } size_t curr_num = total_tensors_processed + current_idx; - pretty_progress(curr_num, total_tensors_to_process, (ggml_time_ms() - t_start) / 1000.0f / (curr_num + 1e-6f)); + pretty_progress(static_cast(curr_num), static_cast(total_tensors_to_process), (ggml_time_ms() - t_start) / 1000.0f / (curr_num + 1e-6f)); std::this_thread::sleep_for(std::chrono::milliseconds(200)); } @@ -1569,7 +1582,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread break; } total_tensors_processed += file_tensors.size(); - pretty_progress(total_tensors_processed, total_tensors_to_process, (ggml_time_ms() - t_start) / 1000.0f / (total_tensors_processed + 1e-6f)); + pretty_progress(static_cast(total_tensors_processed), static_cast(total_tensors_to_process), (ggml_time_ms() - t_start) / 1000.0f / (total_tensors_processed + 1e-6f)); if (total_tensors_processed < total_tensors_to_process) { printf("\n"); } @@ -1588,7 +1601,8 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread bool ModelLoader::load_tensors(std::map& tensors, std::set ignore_tensors, - int n_threads) { + int n_threads, + bool enable_mmap) { std::set tensor_names_in_file; std::mutex tensor_names_mutex; auto on_new_tensor_cb = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) -> bool { @@ -1631,7 +1645,7 @@ bool ModelLoader::load_tensors(std::map& tenso return true; }; - bool success = load_tensors(on_new_tensor_cb, n_threads); + bool success = load_tensors(on_new_tensor_cb, n_threads, enable_mmap); if (!success) { LOG_ERROR("load tensors from file failed"); return false; @@ -1737,6 +1751,13 @@ bool ModelLoader::save_to_gguf_file(const std::string& file_path, ggml_type type // tensor_storage.ne[0], tensor_storage.ne[1], tensor_storage.ne[2], tensor_storage.ne[3], // tensor->n_dims, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]); + if (!tensor->data) { + GGML_ASSERT(ggml_nelements(tensor) == 0); + // avoid crashing the gguf writer by setting a dummy pointer for zero-sized tensors + LOG_DEBUG("setting dummy pointer for zero-sized tensor %s", name.c_str()); + tensor->data = ggml_get_mem_buffer(ggml_ctx); + } + *dst_tensor = tensor; gguf_add_tensor(gguf_ctx, tensor); @@ -1776,7 +1797,12 @@ int64_t ModelLoader::get_params_mem_size(ggml_backend_t backend, ggml_type type) return mem_size; } -bool convert(const char* input_path, const char* vae_path, const char* output_path, sd_type_t output_type, const char* tensor_type_rules) { +bool convert(const char* input_path, + const char* vae_path, + const char* output_path, + sd_type_t output_type, + const char* tensor_type_rules, + bool convert_name) { ModelLoader model_loader; if (!model_loader.init_from_file(input_path)) { @@ -1790,7 +1816,9 @@ bool convert(const char* input_path, const char* vae_path, const char* output_pa return false; } } - model_loader.convert_tensors_name(); + if (convert_name) { + model_loader.convert_tensors_name(); + } bool success = model_loader.save_to_gguf_file(output_path, (ggml_type)output_type, tensor_type_rules); return success; } diff --git a/model.h b/src/model.h similarity index 94% rename from model.h rename to src/model.h index d38aee1c..5b9ce18a 100644 --- a/model.h +++ b/src/model.h @@ -28,9 +28,11 @@ enum SDVersion { VERSION_SD2, VERSION_SD2_INPAINT, VERSION_SD2_TINY_UNET, + VERSION_SDXS, VERSION_SDXL, VERSION_SDXL_INPAINT, VERSION_SDXL_PIX2PIX, + VERSION_SDXL_VEGA, VERSION_SDXL_SSD1B, VERSION_SVD, VERSION_SD3, @@ -43,14 +45,16 @@ enum SDVersion { VERSION_WAN2_2_I2V, VERSION_WAN2_2_TI2V, VERSION_QWEN_IMAGE, + VERSION_ANIMA, VERSION_FLUX2, + VERSION_FLUX2_KLEIN, VERSION_Z_IMAGE, VERSION_OVIS_IMAGE, VERSION_COUNT, }; static inline bool sd_version_is_sd1(SDVersion version) { - if (version == VERSION_SD1 || version == VERSION_SD1_INPAINT || version == VERSION_SD1_PIX2PIX || version == VERSION_SD1_TINY_UNET) { + if (version == VERSION_SD1 || version == VERSION_SD1_INPAINT || version == VERSION_SD1_PIX2PIX || version == VERSION_SD1_TINY_UNET || version == VERSION_SDXS) { return true; } return false; @@ -64,7 +68,7 @@ static inline bool sd_version_is_sd2(SDVersion version) { } static inline bool sd_version_is_sdxl(SDVersion version) { - if (version == VERSION_SDXL || version == VERSION_SDXL_INPAINT || version == VERSION_SDXL_PIX2PIX || version == VERSION_SDXL_SSD1B) { + if (version == VERSION_SDXL || version == VERSION_SDXL_INPAINT || version == VERSION_SDXL_PIX2PIX || version == VERSION_SDXL_SSD1B || version == VERSION_SDXL_VEGA) { return true; } return false; @@ -99,7 +103,7 @@ static inline bool sd_version_is_flux(SDVersion version) { } static inline bool sd_version_is_flux2(SDVersion version) { - if (version == VERSION_FLUX2) { + if (version == VERSION_FLUX2 || version == VERSION_FLUX2_KLEIN) { return true; } return false; @@ -119,6 +123,13 @@ static inline bool sd_version_is_qwen_image(SDVersion version) { return false; } +static inline bool sd_version_is_anima(SDVersion version) { + if (version == VERSION_ANIMA) { + return true; + } + return false; +} + static inline bool sd_version_is_z_image(SDVersion version) { if (version == VERSION_Z_IMAGE) { return true; @@ -143,6 +154,7 @@ static inline bool sd_version_is_dit(SDVersion version) { sd_version_is_sd3(version) || sd_version_is_wan(version) || sd_version_is_qwen_image(version) || + sd_version_is_anima(version) || sd_version_is_z_image(version)) { return true; } @@ -310,10 +322,11 @@ public: std::map get_vae_wtype_stat(); String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; } void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = ""); - bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0); + bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false); bool load_tensors(std::map& tensors, std::set ignore_tensors = {}, - int n_threads = 0); + int n_threads = 0, + bool use_mmap = false); std::vector get_tensor_names() const { std::vector names; @@ -327,13 +340,6 @@ public: bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type); int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT); ~ModelLoader() = default; - - static std::string load_merges(); - static std::string load_qwen2_merges(); - static std::string load_mistral_merges(); - static std::string load_mistral_vocab_json(); - static std::string load_t5_tokenizer_json(); - static std::string load_umt5_tokenizer_json(); }; #endif // __MODEL_H__ diff --git a/name_conversion.cpp b/src/name_conversion.cpp similarity index 96% rename from name_conversion.cpp rename to src/name_conversion.cpp index 8b521486..3b3abfb6 100644 --- a/name_conversion.cpp +++ b/src/name_conversion.cpp @@ -653,6 +653,14 @@ std::string convert_diffusers_dit_to_original_lumina2(std::string name) { return name; } +std::string convert_other_dit_to_original_anima(std::string name) { + static const std::string anima_net_prefix = "net."; + if (!starts_with(name, anima_net_prefix)) { + name = anima_net_prefix + name; + } + return name; +} + std::string convert_diffusion_model_name(std::string name, std::string prefix, SDVersion version) { if (sd_version_is_sd1(version) || sd_version_is_sd2(version)) { name = convert_diffusers_unet_to_original_sd1(name); @@ -664,6 +672,8 @@ std::string convert_diffusion_model_name(std::string name, std::string prefix, S name = convert_diffusers_dit_to_original_flux(name); } else if (sd_version_is_z_image(version)) { name = convert_diffusers_dit_to_original_lumina2(name); + } else if (sd_version_is_anima(version)) { + name = convert_other_dit_to_original_anima(name); } return name; } @@ -835,12 +845,14 @@ std::string convert_sep_to_dot(std::string name) { "proj_out", "transformer_blocks", "single_transformer_blocks", + "single_blocks", "diffusion_model", "cond_stage_model", "first_stage_model", "conv_in", "conv_out", "lora_down", + "lora_mid", "lora_up", "diff_b", "hada_w1_a", @@ -876,7 +888,18 @@ std::string convert_sep_to_dot(std::string name) { "ff_context", "norm_added_q", "norm_added_v", - "to_add_out"}; + "to_add_out", + "txt_mod", + "img_mod", + "txt_mlp", + "img_mlp", + "proj_mlp", + "wi_0", + "wi_1", + "norm1_context", + "ff_context", + "x_embedder", + }; // record the positions of underscores that should NOT be replaced std::unordered_set protected_positions; @@ -948,6 +971,7 @@ bool is_first_stage_model_name(const std::string& name) { std::string convert_tensor_name(std::string name, SDVersion version) { bool is_lora = false; bool is_lycoris_underline = false; + bool is_underline = false; std::vector lora_prefix_vec = { "lora.lora.", "lora.lora_", @@ -955,12 +979,27 @@ std::string convert_tensor_name(std::string name, SDVersion version) { "lora.lycoris.", "lora.", }; + std::vector underline_lora_prefix_vec = { + "unet_", + "te_", + "te1_", + "te2_", + "te3_", + "vae_", + }; for (const auto& prefix : lora_prefix_vec) { if (starts_with(name, prefix)) { is_lora = true; name = name.substr(prefix.size()); if (contains(prefix, "lycoris_")) { is_lycoris_underline = true; + } else { + for (const auto& underline_lora_prefix : underline_lora_prefix_vec) { + if (starts_with(name, underline_lora_prefix)) { + is_underline = true; + break; + } + } } break; } @@ -969,10 +1008,13 @@ std::string convert_tensor_name(std::string name, SDVersion version) { if (is_lora) { std::map lora_suffix_map = { {".lora_down.weight", ".weight.lora_down"}, + {".lora_mid.weight", ".weight.lora_mid"}, {".lora_up.weight", ".weight.lora_up"}, {".lora.down.weight", ".weight.lora_down"}, + {".lora.mid.weight", ".weight.lora_mid"}, {".lora.up.weight", ".weight.lora_up"}, {"_lora.down.weight", ".weight.lora_down"}, + {"_lora.mid.weight", ".weight.lora_mid"}, {"_lora.up.weight", ".weight.lora_up"}, {".lora_A.weight", ".weight.lora_down"}, {".lora_B.weight", ".weight.lora_up"}, @@ -1020,12 +1062,14 @@ std::string convert_tensor_name(std::string name, SDVersion version) { } } - if (sd_version_is_unet(version) || is_lycoris_underline) { + // LOG_DEBUG("name %s %d", name.c_str(), version); + + if (sd_version_is_unet(version) || is_underline || is_lycoris_underline) { name = convert_sep_to_dot(name); } } - std::vector> prefix_map = { + std::unordered_map prefix_map = { {"diffusion_model.", "model.diffusion_model."}, {"unet.", "model.diffusion_model."}, {"transformer.", "model.diffusion_model."}, // dit @@ -1040,8 +1084,13 @@ std::string convert_tensor_name(std::string name, SDVersion version) { // {"te2.text_model.encoder.layers.", "cond_stage_model.1.model.transformer.resblocks."}, {"te2.", "cond_stage_model.1.transformer."}, {"te1.", "cond_stage_model.transformer."}, + {"te3.", "text_encoders.t5xxl.transformer."}, }; + if (sd_version_is_flux(version)) { + prefix_map["te1."] = "text_encoders.clip_l.transformer."; + } + replace_with_prefix_map(name, prefix_map); // diffusion model diff --git a/name_conversion.h b/src/name_conversion.h similarity index 100% rename from name_conversion.h rename to src/name_conversion.h diff --git a/ordered_map.hpp b/src/ordered_map.hpp similarity index 100% rename from ordered_map.hpp rename to src/ordered_map.hpp diff --git a/pmid.hpp b/src/pmid.hpp similarity index 99% rename from pmid.hpp rename to src/pmid.hpp index d69423ad..8ce78d3a 100644 --- a/pmid.hpp +++ b/src/pmid.hpp @@ -33,7 +33,7 @@ public: x = layer_norm->forward(ctx, x); // x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b); x = fc1->forward(ctx, x); - x = ggml_gelu_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); x = fc2->forward(ctx, x); // x = ggml_add(ctx, ggml_mul_mat(ctx, fc2_w, x), fc2_b); if (use_residue) @@ -72,7 +72,7 @@ struct PerceiverAttention : public GGMLBlock { int heads; // = heads public: PerceiverAttention(int dim, int dim_h = 64, int h = 8) - : scale(powf(dim_h, -0.5)), dim_head(dim_h), heads(h) { + : scale(powf(static_cast(dim_h), -0.5f)), dim_head(dim_h), heads(h) { int inner_dim = dim_head * heads; blocks["norm1"] = std::shared_ptr(new LayerNorm(dim)); blocks["norm2"] = std::shared_ptr(new LayerNorm(dim)); @@ -129,8 +129,8 @@ public: k = reshape_tensor(ctx->ggml_ctx, k, heads); v = reshape_tensor(ctx->ggml_ctx, v, heads); scale = 1.f / sqrt(sqrt((float)dim_head)); - k = ggml_scale_inplace(ctx->ggml_ctx, k, scale); - q = ggml_scale_inplace(ctx->ggml_ctx, q, scale); + k = ggml_ext_scale(ctx->ggml_ctx, k, scale, true); + q = ggml_ext_scale(ctx->ggml_ctx, q, scale, true); // auto weight = ggml_mul_mat(ctx, q, k); auto weight = ggml_mul_mat(ctx->ggml_ctx, k, q); // NOTE order of mul is opposite to pytorch diff --git a/preprocessing.hpp b/src/preprocessing.hpp similarity index 94% rename from preprocessing.hpp rename to src/preprocessing.hpp index 4a1b8514..84e0ed3f 100644 --- a/preprocessing.hpp +++ b/src/preprocessing.hpp @@ -2,7 +2,7 @@ #define __PREPROCESSING_HPP__ #include "ggml_extend.hpp" -#define M_PI_ 3.14159265358979323846 +#define M_PI_ 3.14159265358979323846f void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) { struct ggml_init_params params; @@ -20,13 +20,13 @@ void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml } void gaussian_kernel(struct ggml_tensor* kernel) { - int ks_mid = kernel->ne[0] / 2; + int ks_mid = static_cast(kernel->ne[0] / 2); float sigma = 1.4f; float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f)); for (int y = 0; y < kernel->ne[0]; y++) { - float gx = -ks_mid + y; + float gx = static_cast(-ks_mid + y); for (int x = 0; x < kernel->ne[1]; x++) { - float gy = -ks_mid + x; + float gy = static_cast(-ks_mid + x); float k_ = expf(-((gx * gx + gy * gy) / (2.0f * powf(sigma, 2.0f)))) * normal; ggml_ext_tensor_set_f32(kernel, k_, x, y); } @@ -46,7 +46,7 @@ void grayscale(struct ggml_tensor* rgb_img, struct ggml_tensor* grayscale) { } void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) { - int n_elements = ggml_nelements(h); + int n_elements = static_cast(ggml_nelements(h)); float* dx = (float*)x->data; float* dy = (float*)y->data; float* dh = (float*)h->data; @@ -56,7 +56,7 @@ void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor } void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) { - int n_elements = ggml_nelements(h); + int n_elements = static_cast(ggml_nelements(h)); float* dx = (float*)x->data; float* dy = (float*)y->data; float* dh = (float*)h->data; @@ -66,7 +66,7 @@ void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tens } void normalize_tensor(struct ggml_tensor* g) { - int n_elements = ggml_nelements(g); + int n_elements = static_cast(ggml_nelements(g)); float* dg = (float*)g->data; float max = -INFINITY; for (int i = 0; i < n_elements; i++) { @@ -118,7 +118,7 @@ void non_max_supression(struct ggml_tensor* result, struct ggml_tensor* G, struc } void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) { - int n_elements = ggml_nelements(img); + int n_elements = static_cast(ggml_nelements(img)); float* imd = (float*)img->data; float max = -INFINITY; for (int i = 0; i < n_elements; i++) { @@ -209,8 +209,8 @@ bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, non_max_supression(image_gray, G, tetha); threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong); // to RGB channels - for (int iy = 0; iy < img.height; iy++) { - for (int ix = 0; ix < img.width; ix++) { + for (uint32_t iy = 0; iy < img.height; iy++) { + for (uint32_t ix = 0; ix < img.width; ix++) { float gray = ggml_ext_tensor_get_f32(image_gray, ix, iy); gray = inverse ? 1.0f - gray : gray; ggml_ext_tensor_set_f32(image, gray, ix, iy); diff --git a/qwen_image.hpp b/src/qwen_image.hpp similarity index 80% rename from qwen_image.hpp rename to src/qwen_image.hpp index eeb823d5..2c70344c 100644 --- a/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -3,9 +3,8 @@ #include -#include "common.hpp" +#include "common_block.hpp" #include "flux.hpp" -#include "ggml_extend.hpp" namespace Qwen { constexpr int QWEN_IMAGE_GRAPH_SIZE = 20480; @@ -162,26 +161,25 @@ namespace Qwen { auto k = ggml_concat(ctx->ggml_ctx, txt_k, img_k, 2); // [N, n_txt_token + n_img_token, n_head, d_head] auto v = ggml_concat(ctx->ggml_ctx, txt_v, img_v, 2); // [N, n_txt_token + n_img_token, n_head, d_head] - auto attn = Rope::attention(ctx, q, k, v, pe, mask, (1.0f / 128.f)); // [N, n_txt_token + n_img_token, n_head*d_head] - attn = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, attn, 0, 2, 1, 3)); // [n_txt_token + n_img_token, N, hidden_size] + auto attn = Rope::attention(ctx, q, k, v, pe, mask, (1.0f / 128.f)); // [N, n_txt_token + n_img_token, n_head*d_head] auto txt_attn_out = ggml_view_3d(ctx->ggml_ctx, attn, attn->ne[0], - attn->ne[1], txt->ne[1], + attn->ne[2], attn->nb[1], attn->nb[2], - 0); // [n_txt_token, N, hidden_size] - txt_attn_out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, txt_attn_out, 0, 2, 1, 3)); // [N, n_txt_token, hidden_size] + 0); // [N, n_txt_token, n_head*d_head] auto img_attn_out = ggml_view_3d(ctx->ggml_ctx, attn, attn->ne[0], - attn->ne[1], img->ne[1], + attn->ne[2], attn->nb[1], attn->nb[2], - attn->nb[2] * txt->ne[1]); // [n_img_token, N, hidden_size] - img_attn_out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, img_attn_out, 0, 2, 1, 3)); // [N, n_img_token, hidden_size] + txt->ne[1] * attn->nb[1]); // [N, n_img_token, n_head*d_head] + img_attn_out = ggml_cont(ctx->ggml_ctx, img_attn_out); + txt_attn_out = ggml_cont(ctx->ggml_ctx, txt_attn_out); img_attn_out = to_out_0->forward(ctx, img_attn_out); txt_attn_out = to_add_out->forward(ctx, txt_attn_out); @@ -191,11 +189,16 @@ namespace Qwen { }; class QwenImageTransformerBlock : public GGMLBlock { + protected: + bool zero_cond_t; + public: QwenImageTransformerBlock(int64_t dim, int64_t num_attention_heads, int64_t attention_head_dim, - float eps = 1e-6) { + float eps = 1e-6, + bool zero_cond_t = false) + : zero_cond_t(zero_cond_t) { // img_mod.0 is nn.SiLU() blocks["img_mod.1"] = std::shared_ptr(new Linear(dim, 6 * dim, true)); @@ -208,7 +211,7 @@ namespace Qwen { blocks["txt_norm1"] = std::shared_ptr(new LayerNorm(dim, eps, false)); blocks["txt_norm2"] = std::shared_ptr(new LayerNorm(dim, eps, false)); - blocks["txt_mlp"] = std::shared_ptr(new FeedForward(dim, dim, 4, FeedForward::Activation::GELU)); + blocks["txt_mlp"] = std::shared_ptr(new FeedForward(dim, dim, 4, FeedForward::Activation::GELU, true)); blocks["attn"] = std::shared_ptr(new QwenImageAttention(dim, attention_head_dim, @@ -220,11 +223,37 @@ namespace Qwen { eps)); } + std::vector get_mod_params_vec(ggml_context* ctx, ggml_tensor* mod_params, ggml_tensor* index = nullptr) { + // index: [N, n_img_token] + // mod_params: [N, hidden_size * 12] + if (index == nullptr) { + return ggml_ext_chunk(ctx, mod_params, 6, 0); + } + mod_params = ggml_reshape_1d(ctx, mod_params, ggml_nelements(mod_params)); + auto mod_params_vec = ggml_ext_chunk(ctx, mod_params, 12, 0); + index = ggml_reshape_3d(ctx, index, 1, index->ne[0], index->ne[1]); // [N, n_img_token, 1] + index = ggml_repeat_4d(ctx, index, mod_params_vec[0]->ne[0], index->ne[1], index->ne[2], index->ne[3]); // [N, n_img_token, hidden_size] + std::vector mod_results; + for (int i = 0; i < 6; i++) { + auto mod_0 = mod_params_vec[i]; + auto mod_1 = mod_params_vec[i + 6]; + + // mod_result = torch.where(index == 0, mod_0, mod_1) + // mod_result = (1 - index)*mod_0 + index*mod_1 + mod_0 = ggml_sub(ctx, ggml_repeat(ctx, mod_0, index), ggml_mul(ctx, index, mod_0)); // [N, n_img_token, hidden_size] + mod_1 = ggml_mul(ctx, index, mod_1); // [N, n_img_token, hidden_size] + auto mod_result = ggml_add(ctx, mod_0, mod_1); + mod_results.push_back(mod_result); + } + return mod_results; + } + virtual std::pair forward(GGMLRunnerContext* ctx, struct ggml_tensor* img, struct ggml_tensor* txt, struct ggml_tensor* t_emb, - struct ggml_tensor* pe) { + struct ggml_tensor* pe, + struct ggml_tensor* modulate_index = nullptr) { // img: [N, n_img_token, hidden_size] // txt: [N, n_txt_token, hidden_size] // pe: [n_img_token + n_txt_token, d_head/2, 2, 2] @@ -244,14 +273,18 @@ namespace Qwen { auto img_mod_params = ggml_silu(ctx->ggml_ctx, t_emb); img_mod_params = img_mod_1->forward(ctx, img_mod_params); - auto img_mod_param_vec = ggml_ext_chunk(ctx->ggml_ctx, img_mod_params, 6, 0); + auto img_mod_param_vec = get_mod_params_vec(ctx->ggml_ctx, img_mod_params, modulate_index); + + if (zero_cond_t) { + t_emb = ggml_ext_chunk(ctx->ggml_ctx, t_emb, 2, 1)[0]; + } auto txt_mod_params = ggml_silu(ctx->ggml_ctx, t_emb); txt_mod_params = txt_mod_1->forward(ctx, txt_mod_params); - auto txt_mod_param_vec = ggml_ext_chunk(ctx->ggml_ctx, txt_mod_params, 6, 0); + auto txt_mod_param_vec = get_mod_params_vec(ctx->ggml_ctx, txt_mod_params); auto img_normed = img_norm1->forward(ctx, img); - auto img_modulated = Flux::modulate(ctx->ggml_ctx, img_normed, img_mod_param_vec[0], img_mod_param_vec[1]); + auto img_modulated = Flux::modulate(ctx->ggml_ctx, img_normed, img_mod_param_vec[0], img_mod_param_vec[1], modulate_index != nullptr); auto img_gate1 = img_mod_param_vec[2]; auto txt_normed = txt_norm1->forward(ctx, txt); @@ -264,7 +297,7 @@ namespace Qwen { txt = ggml_add(ctx->ggml_ctx, txt, ggml_mul(ctx->ggml_ctx, txt_attn_output, txt_gate1)); auto img_normed2 = img_norm2->forward(ctx, img); - auto img_modulated2 = Flux::modulate(ctx->ggml_ctx, img_normed2, img_mod_param_vec[3], img_mod_param_vec[4]); + auto img_modulated2 = Flux::modulate(ctx->ggml_ctx, img_normed2, img_mod_param_vec[3], img_mod_param_vec[4], modulate_index != nullptr); auto img_gate2 = img_mod_param_vec[5]; auto txt_normed2 = txt_norm2->forward(ctx, txt); @@ -315,16 +348,17 @@ namespace Qwen { }; struct QwenImageParams { - int64_t patch_size = 2; + int patch_size = 2; int64_t in_channels = 64; int64_t out_channels = 16; - int64_t num_layers = 60; + int num_layers = 60; int64_t attention_head_dim = 128; int64_t num_attention_heads = 24; int64_t joint_attention_dim = 3584; - float theta = 10000; + int theta = 10000; std::vector axes_dim = {16, 56, 56}; - int64_t axes_dim_sum = 128; + int axes_dim_sum = 128; + bool zero_cond_t = false; }; class QwenImageModel : public GGMLBlock { @@ -346,7 +380,8 @@ namespace Qwen { auto block = std::shared_ptr(new QwenImageTransformerBlock(inner_dim, params.num_attention_heads, params.attention_head_dim, - 1e-6f)); + 1e-6f, + params.zero_cond_t)); blocks["transformer_blocks." + std::to_string(i)] = block; } @@ -354,74 +389,12 @@ namespace Qwen { blocks["proj_out"] = std::shared_ptr(new Linear(inner_dim, params.patch_size * params.patch_size * params.out_channels)); } - struct ggml_tensor* pad_to_patch_size(struct ggml_context* ctx, - struct ggml_tensor* x) { - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - - int pad_h = (params.patch_size - H % params.patch_size) % params.patch_size; - int pad_w = (params.patch_size - W % params.patch_size) % params.patch_size; - x = ggml_pad(ctx, x, pad_w, pad_h, 0, 0); // [N, C, H + pad_h, W + pad_w] - return x; - } - - struct ggml_tensor* patchify(struct ggml_context* ctx, - struct ggml_tensor* x) { - // x: [N, C, H, W] - // return: [N, h*w, C * patch_size * patch_size] - int64_t N = x->ne[3]; - int64_t C = x->ne[2]; - int64_t H = x->ne[1]; - int64_t W = x->ne[0]; - int64_t p = params.patch_size; - int64_t h = H / params.patch_size; - int64_t w = W / params.patch_size; - - GGML_ASSERT(h * p == H && w * p == W); - - x = ggml_reshape_4d(ctx, x, p, w, p, h * C * N); // [N*C*h, p, w, p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, p, p] - x = ggml_reshape_4d(ctx, x, p * p, w * h, C, N); // [N, C, h*w, p*p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, h*w, C, p*p] - x = ggml_reshape_3d(ctx, x, p * p * C, w * h, N); // [N, h*w, C*p*p] - return x; - } - - struct ggml_tensor* process_img(struct ggml_context* ctx, - struct ggml_tensor* x) { - x = pad_to_patch_size(ctx, x); - x = patchify(ctx, x); - return x; - } - - struct ggml_tensor* unpatchify(struct ggml_context* ctx, - struct ggml_tensor* x, - int64_t h, - int64_t w) { - // x: [N, h*w, C*patch_size*patch_size] - // return: [N, C, H, W] - int64_t N = x->ne[2]; - int64_t C = x->ne[0] / params.patch_size / params.patch_size; - int64_t H = h * params.patch_size; - int64_t W = w * params.patch_size; - int64_t p = params.patch_size; - - GGML_ASSERT(C * p * p == x->ne[0]); - - x = ggml_reshape_4d(ctx, x, p * p, C, w * h, N); // [N, h*w, C, p*p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, C, h*w, p*p] - x = ggml_reshape_4d(ctx, x, p, p, w, h * C * N); // [N*C*h, w, p, p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, p, w, p] - x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*p, w*p] - - return x; - } - struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* timestep, struct ggml_tensor* context, - struct ggml_tensor* pe) { + struct ggml_tensor* pe, + struct ggml_tensor* modulate_index = nullptr) { auto time_text_embed = std::dynamic_pointer_cast(blocks["time_text_embed"]); auto txt_norm = std::dynamic_pointer_cast(blocks["txt_norm"]); auto img_in = std::dynamic_pointer_cast(blocks["img_in"]); @@ -430,18 +403,26 @@ namespace Qwen { auto proj_out = std::dynamic_pointer_cast(blocks["proj_out"]); auto t_emb = time_text_embed->forward(ctx, timestep); - auto img = img_in->forward(ctx, x); - auto txt = txt_norm->forward(ctx, context); - txt = txt_in->forward(ctx, txt); + if (params.zero_cond_t) { + auto t_emb_0 = time_text_embed->forward(ctx, ggml_ext_zeros_like(ctx->ggml_ctx, timestep)); + t_emb = ggml_concat(ctx->ggml_ctx, t_emb, t_emb_0, 1); + } + auto img = img_in->forward(ctx, x); + auto txt = txt_norm->forward(ctx, context); + txt = txt_in->forward(ctx, txt); for (int i = 0; i < params.num_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["transformer_blocks." + std::to_string(i)]); - auto result = block->forward(ctx, img, txt, t_emb, pe); + auto result = block->forward(ctx, img, txt, t_emb, pe, modulate_index); img = result.first; txt = result.second; } + if (params.zero_cond_t) { + t_emb = ggml_ext_chunk(ctx->ggml_ctx, t_emb, 2, 1)[0]; + } + img = norm_out->forward(ctx, img, t_emb); img = proj_out->forward(ctx, img); @@ -453,7 +434,8 @@ namespace Qwen { struct ggml_tensor* timestep, struct ggml_tensor* context, struct ggml_tensor* pe, - std::vector ref_latents = {}) { + std::vector ref_latents = {}, + struct ggml_tensor* modulate_index = nullptr) { // Forward pass of DiT. // x: [N, C, H, W] // timestep: [N,] @@ -466,20 +448,17 @@ namespace Qwen { int64_t C = x->ne[2]; int64_t N = x->ne[3]; - auto img = process_img(ctx->ggml_ctx, x); - uint64_t img_tokens = img->ne[1]; + auto img = DiT::pad_and_patchify(ctx, x, params.patch_size, params.patch_size); + int64_t img_tokens = img->ne[1]; if (ref_latents.size() > 0) { for (ggml_tensor* ref : ref_latents) { - ref = process_img(ctx->ggml_ctx, ref); + ref = DiT::pad_and_patchify(ctx, ref, params.patch_size, params.patch_size); img = ggml_concat(ctx->ggml_ctx, img, ref, 1); } } - int64_t h_len = ((H + (params.patch_size / 2)) / params.patch_size); - int64_t w_len = ((W + (params.patch_size / 2)) / params.patch_size); - - auto out = forward_orig(ctx, img, timestep, context, pe); // [N, h_len*w_len, ph*pw*C] + auto out = forward_orig(ctx, img, timestep, context, pe, modulate_index); // [N, h_len*w_len, ph*pw*C] if (out->ne[1] > img_tokens) { out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, out, 0, 2, 1, 3)); // [num_tokens, N, C * patch_size * patch_size] @@ -487,11 +466,7 @@ namespace Qwen { out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, out, 0, 2, 1, 3)); // [N, h*w, C * patch_size * patch_size] } - out = unpatchify(ctx->ggml_ctx, out, h_len, w_len); // [N, C, H + pad_h, W + pad_w] - - // slice - out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, H); // [N, C, H, W + pad_w] - out = ggml_ext_slice(ctx->ggml_ctx, out, 0, 0, W); // [N, C, H, W] + out = DiT::unpatchify_and_crop(ctx->ggml_ctx, out, H, W, params.patch_size, params.patch_size); // [N, C, H, W] return out; } @@ -502,19 +477,25 @@ namespace Qwen { QwenImageParams qwen_image_params; QwenImageModel qwen_image; std::vector pe_vec; + std::vector modulate_index_vec; SDVersion version; QwenImageRunner(ggml_backend_t backend, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "", - SDVersion version = VERSION_QWEN_IMAGE) + SDVersion version = VERSION_QWEN_IMAGE, + bool zero_cond_t = false) : GGMLRunner(backend, offload_params_to_cpu) { - qwen_image_params.num_layers = 0; + qwen_image_params.num_layers = 0; + qwen_image_params.zero_cond_t = zero_cond_t; for (auto pair : tensor_storage_map) { std::string tensor_name = pair.first; if (tensor_name.find(prefix) == std::string::npos) continue; + if (tensor_name.find("__index_timestep_zero__") != std::string::npos) { + qwen_image_params.zero_cond_t = true; + } size_t pos = tensor_name.find("transformer_blocks."); if (pos != std::string::npos) { tensor_name = tensor_name.substr(pos); // remove prefix @@ -529,6 +510,9 @@ namespace Qwen { } } LOG_INFO("qwen_image_params.num_layers: %ld", qwen_image_params.num_layers); + if (qwen_image_params.zero_cond_t) { + LOG_INFO("use zero_cond_t"); + } qwen_image = QwenImageModel(qwen_image_params); qwen_image.init(params_ctx, tensor_storage_map, prefix); } @@ -557,16 +541,18 @@ namespace Qwen { ref_latents[i] = to_backend(ref_latents[i]); } - pe_vec = Rope::gen_qwen_image_pe(x->ne[1], - x->ne[0], + pe_vec = Rope::gen_qwen_image_pe(static_cast(x->ne[1]), + static_cast(x->ne[0]), qwen_image_params.patch_size, - x->ne[3], - context->ne[1], + static_cast(x->ne[3]), + static_cast(context->ne[1]), ref_latents, increase_ref_index, qwen_image_params.theta, + circular_y_enabled, + circular_x_enabled, qwen_image_params.axes_dim); - int pos_len = pe_vec.size() / qwen_image_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / qwen_image_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, qwen_image_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -574,6 +560,31 @@ namespace Qwen { // pe->data = nullptr; set_backend_tensor_data(pe, pe_vec.data()); + ggml_tensor* modulate_index = nullptr; + if (qwen_image_params.zero_cond_t) { + modulate_index_vec.clear(); + + int64_t h_len = ((x->ne[1] + (qwen_image_params.patch_size / 2)) / qwen_image_params.patch_size); + int64_t w_len = ((x->ne[0] + (qwen_image_params.patch_size / 2)) / qwen_image_params.patch_size); + int64_t num_img_tokens = h_len * w_len; + + modulate_index_vec.insert(modulate_index_vec.end(), num_img_tokens, 0.f); + int64_t num_ref_img_tokens = 0; + for (ggml_tensor* ref : ref_latents) { + int64_t h_len = ((ref->ne[1] + (qwen_image_params.patch_size / 2)) / qwen_image_params.patch_size); + int64_t w_len = ((ref->ne[0] + (qwen_image_params.patch_size / 2)) / qwen_image_params.patch_size); + + num_ref_img_tokens += h_len * w_len; + } + + if (num_ref_img_tokens > 0) { + modulate_index_vec.insert(modulate_index_vec.end(), num_ref_img_tokens, 1.f); + } + + modulate_index = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, modulate_index_vec.size()); + set_backend_tensor_data(modulate_index, modulate_index_vec.data()); + } + auto runner_ctx = get_context(); struct ggml_tensor* out = qwen_image.forward(&runner_ctx, @@ -581,7 +592,8 @@ namespace Qwen { timesteps, context, pe, - ref_latents); + ref_latents, + modulate_index); ggml_build_forward_expand(gf, out); @@ -631,12 +643,12 @@ namespace Qwen { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, {}, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("qwen_image test done in %dms", t1 - t0); + LOG_DEBUG("qwen_image test done in %lldms", t1 - t0); } } @@ -684,4 +696,4 @@ namespace Qwen { } // namespace name -#endif // __QWEN_IMAGE_HPP__ \ No newline at end of file +#endif // __QWEN_IMAGE_HPP__ diff --git a/rng.hpp b/src/rng.hpp similarity index 100% rename from rng.hpp rename to src/rng.hpp diff --git a/rng_mt19937.hpp b/src/rng_mt19937.hpp similarity index 98% rename from rng_mt19937.hpp rename to src/rng_mt19937.hpp index 7e619988..734554bf 100644 --- a/rng_mt19937.hpp +++ b/src/rng_mt19937.hpp @@ -90,7 +90,7 @@ class MT19937RNG : public RNG { float u1 = 1.0f - data[j]; float u2 = data[j + 8]; float r = std::sqrt(-2.0f * std::log(u1)); - float theta = 2.0f * 3.14159265358979323846 * u2; + float theta = 2.0f * 3.14159265358979323846f * u2; data[j] = r * std::cos(theta) * std + mean; data[j + 8] = r * std::sin(theta) * std + mean; } diff --git a/rng_philox.hpp b/src/rng_philox.hpp similarity index 100% rename from rng_philox.hpp rename to src/rng_philox.hpp diff --git a/rope.hpp b/src/rope.hpp similarity index 65% rename from rope.hpp rename to src/rope.hpp index 4abc5146..b26e4fcc 100644 --- a/rope.hpp +++ b/src/rope.hpp @@ -1,6 +1,8 @@ #ifndef __ROPE_HPP__ #define __ROPE_HPP__ +#include +#include #include #include "ggml_extend.hpp" @@ -20,11 +22,11 @@ namespace Rope { } __STATIC_INLINE__ std::vector> transpose(const std::vector>& mat) { - int rows = mat.size(); - int cols = mat[0].size(); + size_t rows = mat.size(); + size_t cols = mat[0].size(); std::vector> transposed(cols, std::vector(rows)); - for (int i = 0; i < rows; ++i) { - for (int j = 0; j < cols; ++j) { + for (size_t i = 0; i < rows; ++i) { + for (size_t j = 0; j < cols; ++j) { transposed[j][i] = mat[i][j]; } } @@ -39,7 +41,10 @@ namespace Rope { return flat_vec; } - __STATIC_INLINE__ std::vector> rope(const std::vector& pos, int dim, int theta) { + __STATIC_INLINE__ std::vector> rope(const std::vector& pos, + int dim, + float theta, + const std::vector& axis_wrap_dims = {}) { assert(dim % 2 == 0); int half_dim = dim / 2; @@ -47,14 +52,31 @@ namespace Rope { std::vector omega(half_dim); for (int i = 0; i < half_dim; ++i) { - omega[i] = 1.0 / std::pow(theta, scale[i]); + omega[i] = 1.0f / ::powf(1.f * theta, scale[i]); } - int pos_size = pos.size(); + size_t pos_size = pos.size(); std::vector> out(pos_size, std::vector(half_dim)); - for (int i = 0; i < pos_size; ++i) { - for (int j = 0; j < half_dim; ++j) { - out[i][j] = pos[i] * omega[j]; + for (size_t i = 0; i < pos_size; ++i) { + for (size_t j = 0; j < half_dim; ++j) { + float angle = pos[i] * omega[j]; + if (!axis_wrap_dims.empty()) { + size_t wrap_size = axis_wrap_dims.size(); + // mod batch size since we only store this for one item in the batch + size_t wrap_idx = wrap_size > 0 ? (i % wrap_size) : 0; + int wrap_dim = axis_wrap_dims[wrap_idx]; + if (wrap_dim > 0) { + constexpr float TWO_PI = 6.28318530717958647692f; + float cycles = omega[j] * wrap_dim / TWO_PI; + // closest periodic harmonic, necessary to ensure things neatly tile + // without this round, things don't tile at the boundaries and you end up + // with the model knowing what is "center" + float rounded = std::round(cycles); + angle = pos[i] * TWO_PI * rounded / wrap_dim; + } + } + + out[i][j] = angle; } } @@ -77,7 +99,7 @@ namespace Rope { for (int dim = 0; dim < axes_dim_num; dim++) { if (arange_dims.find(dim) != arange_dims.end()) { for (int i = 0; i < bs * context_len; i++) { - txt_ids[i][dim] = (i % context_len); + txt_ids[i][dim] = 1.f * (i % context_len); } } } @@ -89,20 +111,29 @@ namespace Rope { int patch_size, int bs, int axes_dim_num, - int index = 0, - int h_offset = 0, - int w_offset = 0) { + int index = 0, + int h_offset = 0, + int w_offset = 0, + bool scale_rope = false) { int h_len = (h + (patch_size / 2)) / patch_size; int w_len = (w + (patch_size / 2)) / patch_size; std::vector> img_ids(h_len * w_len, std::vector(axes_dim_num, 0.0)); - std::vector row_ids = linspace(h_offset, h_len - 1 + h_offset, h_len); - std::vector col_ids = linspace(w_offset, w_len - 1 + w_offset, w_len); + int h_start = h_offset; + int w_start = w_offset; + + if (scale_rope) { + h_start -= h_len / 2; + w_start -= w_len / 2; + } + + std::vector row_ids = linspace(1.f * h_start, 1.f * h_start + h_len - 1, h_len); + std::vector col_ids = linspace(1.f * w_start, 1.f * w_start + w_len - 1, w_len); for (int i = 0; i < h_len; ++i) { for (int j = 0; j < w_len; ++j) { - img_ids[i * w_len + j][0] = index; + img_ids[i * w_len + j][0] = 1.f * index; img_ids[i * w_len + j][1] = row_ids[i]; img_ids[i * w_len + j][2] = col_ids[j]; } @@ -136,11 +167,12 @@ namespace Rope { __STATIC_INLINE__ std::vector embed_nd(const std::vector>& ids, int bs, - int theta, - const std::vector& axes_dim) { + const std::vector& axis_thetas, + const std::vector& axes_dim, + const std::vector>& wrap_dims = {}) { std::vector> trans_ids = transpose(ids); size_t pos_len = ids.size() / bs; - int num_axes = axes_dim.size(); + size_t num_axes = axes_dim.size(); // for (int i = 0; i < pos_len; i++) { // std::cout << trans_ids[0][i] << " " << trans_ids[1][i] << " " << trans_ids[2][i] << std::endl; // } @@ -150,9 +182,18 @@ namespace Rope { emb_dim += d / 2; std::vector> emb(bs * pos_len, std::vector(emb_dim * 2 * 2, 0.0)); - int offset = 0; - for (int i = 0; i < num_axes; ++i) { - std::vector> rope_emb = rope(trans_ids[i], axes_dim[i], theta); // [bs*pos_len, axes_dim[i]/2 * 2 * 2] + size_t offset = 0; + for (size_t i = 0; i < num_axes; ++i) { + std::vector axis_wrap_dims; + if (!wrap_dims.empty() && i < (int)wrap_dims.size()) { + axis_wrap_dims = wrap_dims[i]; + } + float axis_theta = 10000.0f; + if (!axis_thetas.empty()) { + axis_theta = axis_thetas[std::min(i, axis_thetas.size() - 1)]; + } + std::vector> rope_emb = + rope(trans_ids[i], axes_dim[i], axis_theta, axis_wrap_dims); // [bs*pos_len, axes_dim[i]/2 * 2 * 2] for (int b = 0; b < bs; ++b) { for (int j = 0; j < pos_len; ++j) { for (int k = 0; k < rope_emb[0].size(); ++k) { @@ -166,43 +207,55 @@ namespace Rope { return flatten(emb); } + __STATIC_INLINE__ std::vector embed_nd(const std::vector>& ids, + int bs, + float theta, + const std::vector& axes_dim, + const std::vector>& wrap_dims = {}) { + std::vector axis_thetas(axes_dim.size(), theta); + return embed_nd(ids, bs, axis_thetas, axes_dim, wrap_dims); + } + __STATIC_INLINE__ std::vector> gen_refs_ids(int patch_size, int bs, int axes_dim_num, const std::vector& ref_latents, bool increase_ref_index, - float ref_index_scale) { + float ref_index_scale, + bool scale_rope) { std::vector> ids; - uint64_t curr_h_offset = 0; - uint64_t curr_w_offset = 0; - int index = 1; + int curr_h_offset = 0; + int curr_w_offset = 0; + int index = 1; for (ggml_tensor* ref : ref_latents) { - uint64_t h_offset = 0; - uint64_t w_offset = 0; + int h_offset = 0; + int w_offset = 0; if (!increase_ref_index) { if (ref->ne[1] + curr_h_offset > ref->ne[0] + curr_w_offset) { w_offset = curr_w_offset; } else { h_offset = curr_h_offset; } + scale_rope = false; } - auto ref_ids = gen_flux_img_ids(ref->ne[1], - ref->ne[0], + auto ref_ids = gen_flux_img_ids(static_cast(ref->ne[1]), + static_cast(ref->ne[0]), patch_size, bs, axes_dim_num, static_cast(index * ref_index_scale), h_offset, - w_offset); + w_offset, + scale_rope); ids = concat_ids(ids, ref_ids, bs); if (increase_ref_index) { index++; } - curr_h_offset = std::max(curr_h_offset, ref->ne[1] + h_offset); - curr_w_offset = std::max(curr_w_offset, ref->ne[0] + w_offset); + curr_h_offset = std::max(curr_h_offset, static_cast(ref->ne[1]) + h_offset); + curr_w_offset = std::max(curr_w_offset, static_cast(ref->ne[0]) + w_offset); } return ids; } @@ -222,7 +275,7 @@ namespace Rope { auto ids = concat_ids(txt_ids, img_ids, bs); if (ref_latents.size() > 0) { - auto refs_ids = gen_refs_ids(patch_size, bs, axes_dim_num, ref_latents, increase_ref_index, ref_index_scale); + auto refs_ids = gen_refs_ids(patch_size, bs, axes_dim_num, ref_latents, increase_ref_index, ref_index_scale, false); ids = concat_ids(ids, refs_ids, bs); } return ids; @@ -239,6 +292,8 @@ namespace Rope { bool increase_ref_index, float ref_index_scale, int theta, + bool circular_h, + bool circular_w, const std::vector& axes_dim) { std::vector> ids = gen_flux_ids(h, w, @@ -250,7 +305,47 @@ namespace Rope { ref_latents, increase_ref_index, ref_index_scale); - return embed_nd(ids, bs, theta, axes_dim); + std::vector> wrap_dims; + if ((circular_h || circular_w) && bs > 0 && axes_dim.size() >= 3) { + int h_len = (h + (patch_size / 2)) / patch_size; + int w_len = (w + (patch_size / 2)) / patch_size; + if (h_len > 0 && w_len > 0) { + size_t pos_len = ids.size() / bs; + wrap_dims.assign(axes_dim.size(), std::vector(pos_len, 0)); + size_t cursor = context_len; // text first + const size_t img_tokens = static_cast(h_len) * static_cast(w_len); + for (size_t token_i = 0; token_i < img_tokens; ++token_i) { + if (circular_h) { + wrap_dims[1][cursor + token_i] = h_len; + } + if (circular_w) { + wrap_dims[2][cursor + token_i] = w_len; + } + } + cursor += img_tokens; + // reference latents + for (ggml_tensor* ref : ref_latents) { + if (ref == nullptr) { + continue; + } + int ref_h = static_cast(ref->ne[1]); + int ref_w = static_cast(ref->ne[0]); + int ref_h_l = (ref_h + (patch_size / 2)) / patch_size; + int ref_w_l = (ref_w + (patch_size / 2)) / patch_size; + size_t ref_tokens = static_cast(ref_h_l) * static_cast(ref_w_l); + for (size_t token_i = 0; token_i < ref_tokens; ++token_i) { + if (circular_h) { + wrap_dims[1][cursor + token_i] = ref_h_l; + } + if (circular_w) { + wrap_dims[2][cursor + token_i] = ref_w_l; + } + } + cursor += ref_tokens; + } + } + } + return embed_nd(ids, bs, static_cast(theta), axes_dim, wrap_dims); } __STATIC_INLINE__ std::vector> gen_qwen_image_ids(int h, @@ -263,7 +358,7 @@ namespace Rope { int h_len = (h + (patch_size / 2)) / patch_size; int w_len = (w + (patch_size / 2)) / patch_size; int txt_id_start = std::max(h_len, w_len); - auto txt_ids = linspace(txt_id_start, context_len + txt_id_start, context_len); + auto txt_ids = linspace(1.f * txt_id_start, 1.f * context_len + txt_id_start, context_len); std::vector> txt_ids_repeated(bs * context_len, std::vector(3)); for (int i = 0; i < bs; ++i) { for (int j = 0; j < txt_ids.size(); ++j) { @@ -271,10 +366,10 @@ namespace Rope { } } int axes_dim_num = 3; - auto img_ids = gen_flux_img_ids(h, w, patch_size, bs, axes_dim_num); + auto img_ids = gen_flux_img_ids(h, w, patch_size, bs, axes_dim_num, 0, 0, 0, true); auto ids = concat_ids(txt_ids_repeated, img_ids, bs); if (ref_latents.size() > 0) { - auto refs_ids = gen_refs_ids(patch_size, bs, axes_dim_num, ref_latents, increase_ref_index, 1.f); + auto refs_ids = gen_refs_ids(patch_size, bs, axes_dim_num, ref_latents, increase_ref_index, 1.f, true); ids = concat_ids(ids, refs_ids, bs); } return ids; @@ -289,9 +384,57 @@ namespace Rope { const std::vector& ref_latents, bool increase_ref_index, int theta, + bool circular_h, + bool circular_w, const std::vector& axes_dim) { std::vector> ids = gen_qwen_image_ids(h, w, patch_size, bs, context_len, ref_latents, increase_ref_index); - return embed_nd(ids, bs, theta, axes_dim); + std::vector> wrap_dims; + // This logic simply stores the (pad and patch_adjusted) sizes of images so we can make sure rope correctly tiles + if ((circular_h || circular_w) && bs > 0 && axes_dim.size() >= 3) { + int pad_h = (patch_size - (h % patch_size)) % patch_size; + int pad_w = (patch_size - (w % patch_size)) % patch_size; + int h_len = (h + pad_h) / patch_size; + int w_len = (w + pad_w) / patch_size; + if (h_len > 0 && w_len > 0) { + const size_t total_tokens = ids.size(); + // Track per-token wrap lengths for the row/column axes so only spatial tokens become periodic. + wrap_dims.assign(axes_dim.size(), std::vector(total_tokens / bs, 0)); + size_t cursor = context_len; // ignore text tokens + const size_t img_tokens = static_cast(h_len) * static_cast(w_len); + for (size_t token_i = 0; token_i < img_tokens; ++token_i) { + if (circular_h) { + wrap_dims[1][cursor + token_i] = h_len; + } + if (circular_w) { + wrap_dims[2][cursor + token_i] = w_len; + } + } + cursor += img_tokens; + // For each reference image, store wrap sizes as well + for (ggml_tensor* ref : ref_latents) { + if (ref == nullptr) { + continue; + } + int ref_h = static_cast(ref->ne[1]); + int ref_w = static_cast(ref->ne[0]); + int ref_pad_h = (patch_size - (ref_h % patch_size)) % patch_size; + int ref_pad_w = (patch_size - (ref_w % patch_size)) % patch_size; + int ref_h_len = (ref_h + ref_pad_h) / patch_size; + int ref_w_len = (ref_w + ref_pad_w) / patch_size; + size_t ref_n_tokens = static_cast(ref_h_len) * static_cast(ref_w_len); + for (size_t token_i = 0; token_i < ref_n_tokens; ++token_i) { + if (circular_h) { + wrap_dims[1][cursor + token_i] = ref_h_len; + } + if (circular_w) { + wrap_dims[2][cursor + token_i] = ref_w_len; + } + } + cursor += ref_n_tokens; + } + } + } + return embed_nd(ids, bs, static_cast(theta), axes_dim, wrap_dims); } __STATIC_INLINE__ std::vector> gen_vid_ids(int t, @@ -310,9 +453,9 @@ namespace Rope { std::vector> vid_ids(t_len * h_len * w_len, std::vector(3, 0.0)); - std::vector t_ids = linspace(t_offset, t_len - 1 + t_offset, t_len); - std::vector h_ids = linspace(h_offset, h_len - 1 + h_offset, h_len); - std::vector w_ids = linspace(w_offset, w_len - 1 + w_offset, w_len); + std::vector t_ids = linspace(1.f * t_offset, 1.f * t_len - 1 + t_offset, t_len); + std::vector h_ids = linspace(1.f * h_offset, 1.f * h_len - 1 + h_offset, h_len); + std::vector w_ids = linspace(1.f * w_offset, 1.f * w_len - 1 + w_offset, w_len); for (int i = 0; i < t_len; ++i) { for (int j = 0; j < h_len; ++j) { @@ -345,7 +488,7 @@ namespace Rope { int theta, const std::vector& axes_dim) { std::vector> ids = gen_vid_ids(t, h, w, pt, ph, pw, bs); - return embed_nd(ids, bs, theta, axes_dim); + return embed_nd(ids, bs, static_cast(theta), axes_dim); } __STATIC_INLINE__ std::vector> gen_qwen2vl_ids(int grid_h, @@ -363,8 +506,8 @@ namespace Rope { GGML_ASSERT(i < grid_h * grid_w); - ids[i][0] = ih + iy; - ids[i][1] = iw + ix; + ids[i][0] = static_cast(ih + iy); + ids[i][1] = static_cast(iw + ix); index++; } } @@ -381,7 +524,7 @@ namespace Rope { int theta, const std::vector& axes_dim) { std::vector> ids = gen_qwen2vl_ids(grid_h, grid_w, merge_size, window_index); - return embed_nd(ids, 1, theta, axes_dim); + return embed_nd(ids, 1, static_cast(theta), axes_dim); } __STATIC_INLINE__ int bound_mod(int a, int m) { @@ -428,9 +571,33 @@ namespace Rope { const std::vector& ref_latents, bool increase_ref_index, int theta, + bool circular_h, + bool circular_w, const std::vector& axes_dim) { std::vector> ids = gen_z_image_ids(h, w, patch_size, bs, context_len, seq_multi_of, ref_latents, increase_ref_index); - return embed_nd(ids, bs, theta, axes_dim); + std::vector> wrap_dims; + if ((circular_h || circular_w) && bs > 0 && axes_dim.size() >= 3) { + int pad_h = (patch_size - (h % patch_size)) % patch_size; + int pad_w = (patch_size - (w % patch_size)) % patch_size; + int h_len = (h + pad_h) / patch_size; + int w_len = (w + pad_w) / patch_size; + if (h_len > 0 && w_len > 0) { + size_t pos_len = ids.size() / bs; + wrap_dims.assign(axes_dim.size(), std::vector(pos_len, 0)); + size_t cursor = context_len + bound_mod(context_len, seq_multi_of); // skip text (and its padding) + size_t img_tokens = static_cast(h_len) * static_cast(w_len); + for (size_t token_i = 0; token_i < img_tokens; ++token_i) { + if (circular_h) { + wrap_dims[1][cursor + token_i] = h_len; + } + if (circular_w) { + wrap_dims[2][cursor + token_i] = w_len; + } + } + } + } + + return embed_nd(ids, bs, static_cast(theta), axes_dim, wrap_dims); } __STATIC_INLINE__ struct ggml_tensor* apply_rope(struct ggml_context* ctx, @@ -488,7 +655,7 @@ namespace Rope { q = apply_rope(ctx->ggml_ctx, q, pe, rope_interleaved); // [N*n_head, L, d_head] k = apply_rope(ctx->ggml_ctx, k, pe, rope_interleaved); // [N*n_head, L, d_head] - auto x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, v->ne[1], mask, false, true, ctx->flash_attn_enabled, kv_scale); // [N, L, n_head*d_head] + auto x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, v->ne[1], mask, true, ctx->flash_attn_enabled, kv_scale); // [N, L, n_head*d_head] return x; } }; // namespace Rope diff --git a/stable-diffusion.cpp b/src/stable-diffusion.cpp similarity index 82% rename from stable-diffusion.cpp rename to src/stable-diffusion.cpp index a2eeb7f2..d18db6ed 100644 --- a/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -7,6 +7,7 @@ #include "stable-diffusion.h" #include "util.h" +#include "cache_dit.hpp" #include "conditioner.hpp" #include "control.hpp" #include "denoiser.hpp" @@ -16,6 +17,7 @@ #include "lora.hpp" #include "pmid.hpp" #include "tae.hpp" +#include "ucache.hpp" #include "vae.hpp" #include "latent-preview.h" @@ -29,9 +31,11 @@ const char* model_version_to_str[] = { "SD 2.x", "SD 2.x Inpaint", "SD 2.x Tiny UNet", + "SDXS", "SDXL", "SDXL Inpaint", "SDXL Instruct-Pix2Pix", + "SDXL (Vega)", "SDXL (SSD1B)", "SVD", "SD3.x", @@ -44,7 +48,9 @@ const char* model_version_to_str[] = { "Wan 2.2 I2V", "Wan 2.2 TI2V", "Qwen Image", + "Anima", "Flux.2", + "Flux.2 klein", "Z-Image", "Ovis Image", }; @@ -62,6 +68,8 @@ const char* sampling_methods_str[] = { "LCM", "DDIM \"trailing\"", "TCD", + "Res Multistep", + "Res 2s", }; /*================================================== Helper Functions ================================================*/ @@ -100,6 +108,7 @@ public: SDVersion version; bool vae_decode_only = false; + bool external_vae_is_invalid = false; bool free_params_immediately = false; std::shared_ptr rng = std::make_shared(); @@ -107,6 +116,7 @@ public: int n_threads = -1; float scale_factor = 0.18215f; float shift_factor = 0.f; + float default_flow_shift = INFINITY; std::shared_ptr cond_stage_model; std::shared_ptr clip_vision; // for svd or wan2.1 i2v @@ -127,7 +137,7 @@ public: bool use_tiny_autoencoder = false; sd_tiling_params_t vae_tiling_params = {false, 0, 0, 0.5f, 0, 0}; bool offload_params_to_cpu = false; - bool stacked_id = false; + bool use_pmid = false; bool is_using_v_parameterization = false; bool is_using_edm_v_parameterization = false; @@ -165,7 +175,27 @@ public: #endif #ifdef SD_USE_VULKAN LOG_DEBUG("Using Vulkan backend"); - for (int device = 0; device < ggml_backend_vk_get_device_count(); ++device) { + size_t device = 0; + const int device_count = ggml_backend_vk_get_device_count(); + if (device_count) { + const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE"); + if (SD_VK_DEVICE != nullptr) { + std::string sd_vk_device_str = SD_VK_DEVICE; + try { + device = std::stoull(sd_vk_device_str); + } catch (const std::invalid_argument&) { + LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to device 0.", SD_VK_DEVICE); + device = 0; + } catch (const std::out_of_range&) { + LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to device 0.", SD_VK_DEVICE); + device = 0; + } + if (device >= device_count) { + LOG_WARN("Cannot find targeted vulkan device (%llu). Falling back to device 0.", device); + device = 0; + } + } + LOG_INFO("Vulkan: Using device %llu", device); backend = ggml_backend_vk_init(device); } if (!backend) { @@ -294,6 +324,7 @@ public: LOG_INFO("loading vae from '%s'", sd_ctx_params->vae_path); if (!model_loader.init_from_file(sd_ctx_params->vae_path, "vae.")) { LOG_WARN("loading vae from '%s' failed", sd_ctx_params->vae_path); + external_vae_is_invalid = true; } } @@ -375,6 +406,7 @@ public: shift_factor = 0.1159f; } else if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || + sd_version_is_anima(version) || sd_version_is_flux2(version)) { scale_factor = 1.0f; shift_factor = 0.f; @@ -385,6 +417,15 @@ public: vae_decode_only = false; } + bool tae_preview_only = sd_ctx_params->tae_preview_only; + if (version == VERSION_SDXS) { + tae_preview_only = false; + } + + if (sd_ctx_params->circular_x || sd_ctx_params->circular_y) { + LOG_INFO("Using circular padding for convolutions"); + } + bool clip_on_cpu = sd_ctx_params->keep_clip_on_cpu; { @@ -409,7 +450,7 @@ public: } } if (is_chroma) { - if (sd_ctx_params->diffusion_flash_attn && sd_ctx_params->chroma_use_dit_mask) { + if ((sd_ctx_params->flash_attn || sd_ctx_params->diffusion_flash_attn) && sd_ctx_params->chroma_use_dit_mask) { LOG_WARN( "!!!It looks like you are using Chroma with flash attention. " "This is currently unsupported. " @@ -494,7 +535,16 @@ public: offload_params_to_cpu, tensor_storage_map, "model.diffusion_model", - version); + version, + sd_ctx_params->qwen_image_zero_cond_t); + } else if (sd_version_is_anima(version)) { + cond_stage_model = std::make_shared(clip_backend, + offload_params_to_cpu, + tensor_storage_map); + diffusion_model = std::make_shared(backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model"); } else if (sd_version_is_z_image(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, @@ -507,7 +557,7 @@ public: version); } else { // SD1.x SD2.x SDXL std::map embbeding_map; - for (int i = 0; i < sd_ctx_params->embedding_count; i++) { + for (uint32_t i = 0; i < sd_ctx_params->embedding_count; i++) { embbeding_map.emplace(SAFE_STR(sd_ctx_params->embeddings[i].name), SAFE_STR(sd_ctx_params->embeddings[i].path)); } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { @@ -534,11 +584,6 @@ public: } } - if (sd_ctx_params->diffusion_flash_attn) { - LOG_INFO("Using flash attention in the diffusion model"); - diffusion_model->set_flash_attn_enabled(true); - } - cond_stage_model->alloc_params_buffer(); cond_stage_model->get_param_tensors(tensors); @@ -561,8 +606,8 @@ public: vae_backend = backend; } - if (sd_version_is_wan(version) || sd_version_is_qwen_image(version)) { - if (!use_tiny_autoencoder) { + if (!(use_tiny_autoencoder || version == VERSION_SDXS) || tae_preview_only) { + if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { first_stage_model = std::make_shared(vae_backend, offload_params_to_cpu, tensor_storage_map, @@ -571,57 +616,59 @@ public: version); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); + } else if (version == VERSION_CHROMA_RADIANCE) { + first_stage_model = std::make_shared(vae_backend, + offload_params_to_cpu); } else { + first_stage_model = std::make_shared(vae_backend, + offload_params_to_cpu, + tensor_storage_map, + "first_stage_model", + vae_decode_only, + false, + version); + if (sd_ctx_params->vae_conv_direct) { + LOG_INFO("Using Conv2d direct in the vae model"); + first_stage_model->set_conv2d_direct_enabled(true); + } + if (sd_version_is_sdxl(version) && + (strlen(SAFE_STR(sd_ctx_params->vae_path)) == 0 || sd_ctx_params->force_sdxl_vae_conv_scale || external_vae_is_invalid)) { + float vae_conv_2d_scale = 1.f / 32.f; + LOG_WARN( + "No valid VAE specified with --vae or --force-sdxl-vae-conv-scale flag set, " + "using Conv2D scale %.3f", + vae_conv_2d_scale); + first_stage_model->set_conv2d_scale(vae_conv_2d_scale); + } + first_stage_model->alloc_params_buffer(); + first_stage_model->get_param_tensors(tensors, "first_stage_model"); + } + } + if (use_tiny_autoencoder || version == VERSION_SDXS) { + if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { tae_first_stage = std::make_shared(vae_backend, offload_params_to_cpu, tensor_storage_map, "decoder", vae_decode_only, version); - if (sd_ctx_params->vae_conv_direct) { - LOG_INFO("Using Conv2d direct in the tae model"); - tae_first_stage->set_conv2d_direct_enabled(true); + } else { + tae_first_stage = std::make_shared(vae_backend, + offload_params_to_cpu, + tensor_storage_map, + "decoder.layers", + vae_decode_only, + version); + if (version == VERSION_SDXS) { + tae_first_stage->alloc_params_buffer(); + tae_first_stage->get_param_tensors(tensors, "first_stage_model"); } } - } else if (version == VERSION_CHROMA_RADIANCE) { - first_stage_model = std::make_shared(vae_backend, - offload_params_to_cpu); - } else if (!use_tiny_autoencoder || sd_ctx_params->tae_preview_only) { - first_stage_model = std::make_shared(vae_backend, - offload_params_to_cpu, - tensor_storage_map, - "first_stage_model", - vae_decode_only, - false, - version); - if (sd_ctx_params->vae_conv_direct) { - LOG_INFO("Using Conv2d direct in the vae model"); - first_stage_model->set_conv2d_direct_enabled(true); - } - if (version == VERSION_SDXL && - (strlen(SAFE_STR(sd_ctx_params->vae_path)) == 0 || sd_ctx_params->force_sdxl_vae_conv_scale)) { - float vae_conv_2d_scale = 1.f / 32.f; - LOG_WARN( - "No VAE specified with --vae or --force-sdxl-vae-conv-scale flag set, " - "using Conv2D scale %.3f", - vae_conv_2d_scale); - first_stage_model->set_conv2d_scale(vae_conv_2d_scale); - } - first_stage_model->alloc_params_buffer(); - first_stage_model->get_param_tensors(tensors, "first_stage_model"); - } else if (use_tiny_autoencoder) { - tae_first_stage = std::make_shared(vae_backend, - offload_params_to_cpu, - tensor_storage_map, - "decoder.layers", - vae_decode_only, - version); if (sd_ctx_params->vae_conv_direct) { LOG_INFO("Using Conv2d direct in the tae model"); tae_first_stage->set_conv2d_direct_enabled(true); } } - // first_stage_model->get_param_tensors(tensors, "first_stage_model."); if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { ggml_backend_t controlnet_backend = nullptr; @@ -672,16 +719,52 @@ public: if (!model_loader.init_from_file_and_convert_name(sd_ctx_params->photo_maker_path, "pmid.")) { LOG_WARN("loading stacked ID embedding from '%s' failed", sd_ctx_params->photo_maker_path); } else { - stacked_id = true; + use_pmid = true; } } - if (stacked_id) { + if (use_pmid) { if (!pmid_model->alloc_params_buffer()) { LOG_ERROR(" pmid model params buffer allocation failed"); return false; } pmid_model->get_param_tensors(tensors, "pmid"); } + + if (sd_ctx_params->flash_attn) { + LOG_INFO("Using flash attention"); + cond_stage_model->set_flash_attention_enabled(true); + if (clip_vision) { + clip_vision->set_flash_attention_enabled(true); + } + if (first_stage_model) { + first_stage_model->set_flash_attention_enabled(true); + } + if (tae_first_stage) { + tae_first_stage->set_flash_attention_enabled(true); + } + } + + if (sd_ctx_params->flash_attn || sd_ctx_params->diffusion_flash_attn) { + LOG_INFO("Using flash attention in the diffusion model"); + diffusion_model->set_flash_attention_enabled(true); + if (high_noise_diffusion_model) { + high_noise_diffusion_model->set_flash_attention_enabled(true); + } + } + + diffusion_model->set_circular_axes(sd_ctx_params->circular_x, sd_ctx_params->circular_y); + if (high_noise_diffusion_model) { + high_noise_diffusion_model->set_circular_axes(sd_ctx_params->circular_x, sd_ctx_params->circular_y); + } + if (control_net) { + control_net->set_circular_axes(sd_ctx_params->circular_x, sd_ctx_params->circular_y); + } + if (first_stage_model) { + first_stage_model->set_circular_axes(sd_ctx_params->circular_x, sd_ctx_params->circular_y); + } + if (tae_first_stage) { + tae_first_stage->set_circular_axes(sd_ctx_params->circular_x, sd_ctx_params->circular_y); + } } struct ggml_init_params params; @@ -702,9 +785,12 @@ public: if (use_tiny_autoencoder) { ignore_tensors.insert("first_stage_model."); } - if (stacked_id) { + if (use_pmid) { ignore_tensors.insert("pmid.unet."); } + ignore_tensors.insert("model.diffusion_model.__x0__"); + ignore_tensors.insert("model.diffusion_model.__32x32__"); + ignore_tensors.insert("model.diffusion_model.__index_timestep_zero__"); if (vae_decode_only) { ignore_tensors.insert("first_stage_model.encoder"); @@ -720,7 +806,7 @@ public: if (version == VERSION_SVD) { ignore_tensors.insert("conditioner.embedders.3"); } - bool success = model_loader.load_tensors(tensors, ignore_tensors, n_threads); + bool success = model_loader.load_tensors(tensors, ignore_tensors, n_threads, sd_ctx_params->enable_mmap); if (!success) { LOG_ERROR("load tensors from model loader failed"); ggml_free(ctx); @@ -736,14 +822,15 @@ public: unet_params_mem_size += high_noise_diffusion_model->get_params_buffer_size(); } size_t vae_params_mem_size = 0; - if (!use_tiny_autoencoder || sd_ctx_params->tae_preview_only) { + if (!(use_tiny_autoencoder || version == VERSION_SDXS) || tae_preview_only) { vae_params_mem_size = first_stage_model->get_params_buffer_size(); } - if (use_tiny_autoencoder) { - if (!tae_first_stage->load_from_file(taesd_path, n_threads)) { + if (use_tiny_autoencoder || version == VERSION_SDXS) { + if (use_tiny_autoencoder && !tae_first_stage->load_from_file(taesd_path, n_threads)) { return false; } - vae_params_mem_size = tae_first_stage->get_params_buffer_size(); + use_tiny_autoencoder = true; // now the processing is identical for VERSION_SDXS + vae_params_mem_size = tae_first_stage->get_params_buffer_size(); } size_t control_net_params_mem_size = 0; if (control_net) { @@ -753,7 +840,7 @@ public: control_net_params_mem_size = control_net->get_params_buffer_size(); } size_t pmid_params_mem_size = 0; - if (stacked_id) { + if (use_pmid) { pmid_params_mem_size = pmid_model->get_params_buffer_size(); } @@ -805,7 +892,6 @@ public: // init denoiser { prediction_t pred_type = sd_ctx_params->prediction; - float flow_shift = sd_ctx_params->flow_shift; if (pred_type == PREDICTION_COUNT) { if (sd_version_is_sd2(version)) { @@ -828,23 +914,22 @@ public: } else if (sd_version_is_sd3(version) || sd_version_is_wan(version) || sd_version_is_qwen_image(version) || + sd_version_is_anima(version) || sd_version_is_z_image(version)) { pred_type = FLOW_PRED; - if (flow_shift == INFINITY) { - if (sd_version_is_wan(version)) { - flow_shift = 5.f; - } else { - flow_shift = 3.f; - } + if (sd_version_is_wan(version)) { + default_flow_shift = 5.f; + } else { + default_flow_shift = 3.f; } } else if (sd_version_is_flux(version)) { pred_type = FLUX_FLOW_PRED; - if (flow_shift == INFINITY) { - flow_shift = 1.0f; // TODO: validate - for (const auto& [name, tensor_storage] : tensor_storage_map) { - if (starts_with(name, "model.diffusion_model.guidance_in.in_layer.weight")) { - flow_shift = 1.15f; - } + + default_flow_shift = 1.0f; // TODO: validate + for (const auto& [name, tensor_storage] : tensor_storage_map) { + if (starts_with(name, "model.diffusion_model.guidance_in.in_layer.weight")) { + default_flow_shift = 1.15f; + break; } } } else if (sd_version_is_flux2(version)) { @@ -868,12 +953,12 @@ public: break; case FLOW_PRED: { LOG_INFO("running in FLOW mode"); - denoiser = std::make_shared(flow_shift); + denoiser = std::make_shared(); break; } case FLUX_FLOW_PRED: { LOG_INFO("running in Flux FLOW mode"); - denoiser = std::make_shared(flow_shift); + denoiser = std::make_shared(); break; } case FLUX2_FLOW_PRED: { @@ -898,7 +983,7 @@ public: } ggml_free(ctx); - use_tiny_autoencoder = use_tiny_autoencoder && !sd_ctx_params->tae_preview_only; + use_tiny_autoencoder = use_tiny_autoencoder && !tae_preview_only; return true; } @@ -1013,6 +1098,18 @@ public: cond_stage_lora_models.clear(); diffusion_lora_models.clear(); first_stage_lora_models.clear(); + if (cond_stage_model) { + cond_stage_model->set_weight_adapter(nullptr); + } + if (diffusion_model) { + diffusion_model->set_weight_adapter(nullptr); + } + if (high_noise_diffusion_model) { + high_noise_diffusion_model->set_weight_adapter(nullptr); + } + if (first_stage_model) { + first_stage_model->set_weight_adapter(nullptr); + } if (lora_state.empty()) { return; } @@ -1144,7 +1241,7 @@ public: void apply_loras(const sd_lora_t* loras, uint32_t lora_count) { std::unordered_map lora_f2m; - for (int i = 0; i < lora_count; i++) { + for (uint32_t i = 0; i < lora_count; i++) { std::string lora_id = SAFE_STR(loras[i].path); if (loras[i].is_high_noise) { lora_id = "|high_noise|" + lora_id; @@ -1164,14 +1261,89 @@ public: } } - ggml_tensor* id_encoder(ggml_context* work_ctx, - ggml_tensor* init_img, - ggml_tensor* prompts_embeds, - ggml_tensor* id_embeds, - std::vector& class_tokens_mask) { - ggml_tensor* res = nullptr; - pmid_model->compute(n_threads, init_img, prompts_embeds, id_embeds, class_tokens_mask, &res, work_ctx); - return res; + SDCondition get_pmid_conditon(ggml_context* work_ctx, + sd_pm_params_t pm_params, + ConditionerParams& condition_params) { + SDCondition id_cond; + if (use_pmid) { + if (!pmid_lora->applied) { + int64_t t0 = ggml_time_ms(); + pmid_lora->apply(tensors, version, n_threads); + int64_t t1 = ggml_time_ms(); + pmid_lora->applied = true; + LOG_INFO("pmid_lora apply completed, taking %.2fs", (t1 - t0) * 1.0f / 1000); + if (free_params_immediately) { + pmid_lora->free_params_buffer(); + } + } + // preprocess input id images + bool pmv2 = pmid_model->get_version() == PM_VERSION_2; + if (pm_params.id_images_count > 0) { + int clip_image_size = 224; + pmid_model->style_strength = pm_params.style_strength; + + auto id_image_tensor = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, clip_image_size, clip_image_size, 3, pm_params.id_images_count); + + std::vector processed_id_images; + for (int i = 0; i < pm_params.id_images_count; i++) { + sd_image_f32_t id_image = sd_image_t_to_sd_image_f32_t(pm_params.id_images[i]); + sd_image_f32_t processed_id_image = clip_preprocess(id_image, clip_image_size, clip_image_size); + free(id_image.data); + id_image.data = nullptr; + processed_id_images.push_back(processed_id_image); + } + + ggml_ext_tensor_iter(id_image_tensor, [&](ggml_tensor* id_image_tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3) { + float value = sd_image_get_f32(processed_id_images[i3], i0, i1, i2, false); + ggml_ext_tensor_set_f32(id_image_tensor, value, i0, i1, i2, i3); + }); + + for (auto& image : processed_id_images) { + free(image.data); + image.data = nullptr; + } + processed_id_images.clear(); + + int64_t t0 = ggml_time_ms(); + condition_params.num_input_imgs = pm_params.id_images_count; + auto cond_tup = cond_stage_model->get_learned_condition_with_trigger(work_ctx, + n_threads, + condition_params); + id_cond = std::get<0>(cond_tup); + auto class_tokens_mask = std::get<1>(cond_tup); + struct ggml_tensor* id_embeds = nullptr; + if (pmv2 && pm_params.id_embed_path != nullptr) { + id_embeds = load_tensor_from_file(work_ctx, pm_params.id_embed_path); + } + if (pmv2 && id_embeds == nullptr) { + LOG_WARN("Provided PhotoMaker images, but NO valid ID embeds file for PM v2"); + LOG_WARN("Turn off PhotoMaker"); + use_pmid = false; + } else { + if (pmv2 && pm_params.id_images_count != id_embeds->ne[1]) { + LOG_WARN("PhotoMaker image count (%d) does NOT match ID embeds (%d). You should run face_detect.py again.", pm_params.id_images_count, id_embeds->ne[1]); + LOG_WARN("Turn off PhotoMaker"); + use_pmid = false; + } else { + ggml_tensor* res = nullptr; + pmid_model->compute(n_threads, id_image_tensor, id_cond.c_crossattn, id_embeds, class_tokens_mask, &res, work_ctx); + id_cond.c_crossattn = res; + int64_t t1 = ggml_time_ms(); + LOG_INFO("Photomaker ID Stacking, taking %" PRId64 " ms", t1 - t0); + if (free_params_immediately) { + pmid_model->free_params_buffer(); + } + // Encode input prompt without the trigger word for delayed conditioning + condition_params.text = cond_stage_model->remove_trigger_from_prompt(work_ctx, condition_params.text); + } + } + } else { + LOG_WARN("Provided PhotoMaker model file, but NO input ID images"); + LOG_WARN("Turn off PhotoMaker"); + use_pmid = false; + } + } + return id_cond; } ggml_tensor* get_clip_vision_output(ggml_context* work_ctx, @@ -1321,12 +1493,12 @@ public: void* step_callback_data, bool is_noisy) { const uint32_t channel = 3; - uint32_t width = latents->ne[0]; - uint32_t height = latents->ne[1]; - uint32_t dim = latents->ne[ggml_n_dims(latents) - 1]; + uint32_t width = static_cast(latents->ne[0]); + uint32_t height = static_cast(latents->ne[1]); + uint32_t dim = static_cast(latents->ne[ggml_n_dims(latents) - 1]); if (preview_mode == PREVIEW_PROJ) { - int64_t patch_sz = 1; + int patch_sz = 1; const float(*latent_rgb_proj)[channel] = nullptr; float* latent_rgb_bias = nullptr; @@ -1354,7 +1526,7 @@ public: } else if (sd_version_is_flux(version) || sd_version_is_z_image(version)) { latent_rgb_proj = flux_latent_rgb_proj; latent_rgb_bias = flux_latent_rgb_bias; - } else if (sd_version_is_wan(version) || sd_version_is_qwen_image(version)) { + } else if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { latent_rgb_proj = wan_21_latent_rgb_proj; latent_rgb_bias = wan_21_latent_rgb_bias; } else { @@ -1386,7 +1558,7 @@ public: uint32_t frames = 1; if (ggml_n_dims(latents) == 4) { - frames = latents->ne[2]; + frames = static_cast(latents->ne[2]); } uint32_t img_width = width * patch_sz; @@ -1396,7 +1568,7 @@ public: preview_latent_video(data, latents, latent_rgb_proj, latent_rgb_bias, patch_sz); sd_image_t* images = (sd_image_t*)malloc(frames * sizeof(sd_image_t)); - for (int i = 0; i < frames; i++) { + for (uint32_t i = 0; i < frames; i++) { images[i] = {img_width, img_height, channel, data + i * img_width * img_height * channel}; } step_callback(step, frames, images, is_noisy, step_callback_data); @@ -1408,7 +1580,7 @@ public: if (vae_tiling_params.enabled) { // split latent in 32x32 tiles and compute in several steps auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - first_stage_model->compute(n_threads, in, true, &out, nullptr); + return first_stage_model->compute(n_threads, in, true, &out, nullptr); }; silent_tiling(latents, result, get_vae_scale_factor(), 32, 0.5f, on_tiling); @@ -1427,7 +1599,7 @@ public: if (vae_tiling_params.enabled) { // split latent in 64x64 tiles and compute in several steps auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - tae_first_stage->compute(n_threads, in, true, &out, nullptr); + return tae_first_stage->compute(n_threads, in, true, &out, nullptr); }; silent_tiling(latents, result, get_vae_scale_factor(), 64, 0.5f, on_tiling); } else { @@ -1441,22 +1613,22 @@ public: ggml_ext_tensor_clamp_inplace(result, 0.0f, 1.0f); uint32_t frames = 1; if (ggml_n_dims(latents) == 4) { - frames = result->ne[2]; + frames = static_cast(result->ne[2]); } sd_image_t* images = (sd_image_t*)malloc(frames * sizeof(sd_image_t)); // print_ggml_tensor(result,true); for (size_t i = 0; i < frames; i++) { - images[i].width = result->ne[0]; - images[i].height = result->ne[1]; + images[i].width = static_cast(result->ne[0]); + images[i].height = static_cast(result->ne[1]); images[i].channel = 3; - images[i].data = ggml_tensor_to_sd_image(result, i, ggml_n_dims(latents) == 4); + images[i].data = ggml_tensor_to_sd_image(result, static_cast(i), ggml_n_dims(latents) == 4); } step_callback(step, frames, images, is_noisy, step_callback_data); ggml_ext_tensor_scale_inplace(result, 0); - for (int i = 0; i < frames; i++) { + for (uint32_t i = 0; i < frames; i++) { free(images[i].data); } @@ -1481,19 +1653,30 @@ public: const std::vector& sigmas, int start_merge_step, SDCondition id_cond, - std::vector ref_latents = {}, - bool increase_ref_index = false, - ggml_tensor* denoise_mask = nullptr, - ggml_tensor* vace_context = nullptr, - float vace_strength = 1.f, - const sd_easycache_params_t* easycache_params = nullptr) { + std::vector ref_latents = {}, + bool increase_ref_index = false, + ggml_tensor* denoise_mask = nullptr, + ggml_tensor* vace_context = nullptr, + float vace_strength = 1.f, + const sd_cache_params_t* cache_params = nullptr) { if (shifted_timestep > 0 && !sd_version_is_sdxl(version)) { LOG_WARN("timestep shifting is only supported for SDXL models!"); shifted_timestep = 0; } std::vector skip_layers(guidance.slg.layers, guidance.slg.layers + guidance.slg.layer_count); - float cfg_scale = guidance.txt_cfg; + float cfg_scale = guidance.txt_cfg; + if (cfg_scale < 1.f) { + if (cfg_scale == 0.f) { + // Diffusers follow the convention from the original paper + // (https://arxiv.org/abs/2207.12598v1), so many distilled model docs + // recommend 0 as guidance; warn the user that it'll disable prompt folowing + LOG_WARN("unconditioned mode, images won't follow the prompt (use cfg-scale=1 for distilled models)"); + } else { + LOG_WARN("cfg value out of expected range may produce unexpected results"); + } + } + float img_cfg_scale = std::isfinite(guidance.img_cfg) ? guidance.img_cfg : guidance.txt_cfg; float slg_scale = guidance.slg.scale; @@ -1503,31 +1686,40 @@ public: } EasyCacheState easycache_state; + UCacheState ucache_state; + CacheDitConditionState cachedit_state; bool easycache_enabled = false; - if (easycache_params != nullptr && easycache_params->enabled) { - bool easycache_supported = sd_version_is_dit(version); - if (!easycache_supported) { - LOG_WARN("EasyCache requested but not supported for this model type"); - } else { - EasyCacheConfig easycache_config; - easycache_config.enabled = true; - easycache_config.reuse_threshold = std::max(0.0f, easycache_params->reuse_threshold); - easycache_config.start_percent = easycache_params->start_percent; - easycache_config.end_percent = easycache_params->end_percent; - bool percent_valid = easycache_config.start_percent >= 0.0f && - easycache_config.start_percent < 1.0f && - easycache_config.end_percent > 0.0f && - easycache_config.end_percent <= 1.0f && - easycache_config.start_percent < easycache_config.end_percent; - if (!percent_valid) { - LOG_WARN("EasyCache disabled due to invalid percent range (start=%.3f, end=%.3f)", - easycache_config.start_percent, - easycache_config.end_percent); + bool ucache_enabled = false; + bool cachedit_enabled = false; + + if (cache_params != nullptr && cache_params->mode != SD_CACHE_DISABLED) { + bool percent_valid = true; + if (cache_params->mode == SD_CACHE_EASYCACHE || cache_params->mode == SD_CACHE_UCACHE) { + percent_valid = cache_params->start_percent >= 0.0f && + cache_params->start_percent < 1.0f && + cache_params->end_percent > 0.0f && + cache_params->end_percent <= 1.0f && + cache_params->start_percent < cache_params->end_percent; + } + + if (!percent_valid) { + LOG_WARN("Cache disabled due to invalid percent range (start=%.3f, end=%.3f)", + cache_params->start_percent, + cache_params->end_percent); + } else if (cache_params->mode == SD_CACHE_EASYCACHE) { + bool easycache_supported = sd_version_is_dit(version); + if (!easycache_supported) { + LOG_WARN("EasyCache requested but not supported for this model type"); } else { + EasyCacheConfig easycache_config; + easycache_config.enabled = true; + easycache_config.reuse_threshold = std::max(0.0f, cache_params->reuse_threshold); + easycache_config.start_percent = cache_params->start_percent; + easycache_config.end_percent = cache_params->end_percent; easycache_state.init(easycache_config, denoiser.get()); if (easycache_state.enabled()) { easycache_enabled = true; - LOG_INFO("EasyCache enabled - threshold: %.3f, start_percent: %.2f, end_percent: %.2f", + LOG_INFO("EasyCache enabled - threshold: %.3f, start: %.2f, end: %.2f", easycache_config.reuse_threshold, easycache_config.start_percent, easycache_config.end_percent); @@ -1535,9 +1727,84 @@ public: LOG_WARN("EasyCache requested but could not be initialized for this run"); } } + } else if (cache_params->mode == SD_CACHE_UCACHE) { + bool ucache_supported = sd_version_is_unet(version); + if (!ucache_supported) { + LOG_WARN("UCache requested but not supported for this model type (only UNET models)"); + } else { + UCacheConfig ucache_config; + ucache_config.enabled = true; + ucache_config.reuse_threshold = std::max(0.0f, cache_params->reuse_threshold); + ucache_config.start_percent = cache_params->start_percent; + ucache_config.end_percent = cache_params->end_percent; + ucache_config.error_decay_rate = std::max(0.0f, std::min(1.0f, cache_params->error_decay_rate)); + ucache_config.use_relative_threshold = cache_params->use_relative_threshold; + ucache_config.reset_error_on_compute = cache_params->reset_error_on_compute; + ucache_state.init(ucache_config, denoiser.get()); + if (ucache_state.enabled()) { + ucache_enabled = true; + LOG_INFO("UCache enabled - threshold: %.3f, start: %.2f, end: %.2f, decay: %.2f, relative: %s, reset: %s", + ucache_config.reuse_threshold, + ucache_config.start_percent, + ucache_config.end_percent, + ucache_config.error_decay_rate, + ucache_config.use_relative_threshold ? "true" : "false", + ucache_config.reset_error_on_compute ? "true" : "false"); + } else { + LOG_WARN("UCache requested but could not be initialized for this run"); + } + } + } else if (cache_params->mode == SD_CACHE_DBCACHE || + cache_params->mode == SD_CACHE_TAYLORSEER || + cache_params->mode == SD_CACHE_CACHE_DIT) { + bool cachedit_supported = sd_version_is_dit(version); + if (!cachedit_supported) { + LOG_WARN("CacheDIT requested but not supported for this model type (only DiT models)"); + } else { + DBCacheConfig dbcfg; + dbcfg.enabled = (cache_params->mode == SD_CACHE_DBCACHE || + cache_params->mode == SD_CACHE_CACHE_DIT); + dbcfg.Fn_compute_blocks = cache_params->Fn_compute_blocks; + dbcfg.Bn_compute_blocks = cache_params->Bn_compute_blocks; + dbcfg.residual_diff_threshold = cache_params->residual_diff_threshold; + dbcfg.max_warmup_steps = cache_params->max_warmup_steps; + dbcfg.max_cached_steps = cache_params->max_cached_steps; + dbcfg.max_continuous_cached_steps = cache_params->max_continuous_cached_steps; + if (cache_params->scm_mask != nullptr && strlen(cache_params->scm_mask) > 0) { + dbcfg.steps_computation_mask = parse_scm_mask(cache_params->scm_mask); + } + dbcfg.scm_policy_dynamic = cache_params->scm_policy_dynamic; + + TaylorSeerConfig tcfg; + tcfg.enabled = (cache_params->mode == SD_CACHE_TAYLORSEER || + cache_params->mode == SD_CACHE_CACHE_DIT); + tcfg.n_derivatives = cache_params->taylorseer_n_derivatives; + tcfg.skip_interval_steps = cache_params->taylorseer_skip_interval; + + cachedit_state.init(dbcfg, tcfg); + if (cachedit_state.enabled()) { + cachedit_enabled = true; + LOG_INFO("CacheDIT enabled - mode: %s, Fn: %d, Bn: %d, threshold: %.3f, warmup: %d", + cache_params->mode == SD_CACHE_CACHE_DIT ? "DBCache+TaylorSeer" : (cache_params->mode == SD_CACHE_DBCACHE ? "DBCache" : "TaylorSeer"), + dbcfg.Fn_compute_blocks, + dbcfg.Bn_compute_blocks, + dbcfg.residual_diff_threshold, + dbcfg.max_warmup_steps); + } else { + LOG_WARN("CacheDIT requested but could not be initialized for this run"); + } + } } } + if (ucache_enabled) { + ucache_state.set_sigmas(sigmas); + } + + if (cachedit_enabled) { + cachedit_state.set_sigmas(sigmas); + } + size_t steps = sigmas.size() - 1; struct ggml_tensor* x = ggml_dup_tensor(work_ctx, init_latent); copy_ggml_tensor(x, init_latent); @@ -1583,7 +1850,7 @@ public: int64_t H = x->ne[1] * get_vae_scale_factor(); if (ggml_n_dims(x) == 4) { // assuming video mode (if batch processing gets implemented this will break) - int T = x->ne[2]; + int64_t T = x->ne[2]; if (sd_version_is_wan(version)) { T = ((T - 1) * 4) + 1; } @@ -1641,6 +1908,91 @@ public: return easycache_step_active && easycache_state.is_step_skipped(); }; + const bool ucache_step_active = ucache_enabled && step > 0; + int ucache_step_index = ucache_step_active ? (step - 1) : -1; + if (ucache_step_active) { + ucache_state.begin_step(ucache_step_index, sigma); + } + + auto ucache_before_condition = [&](const SDCondition* condition, struct ggml_tensor* output_tensor) -> bool { + if (!ucache_step_active || condition == nullptr || output_tensor == nullptr) { + return false; + } + return ucache_state.before_condition(condition, + diffusion_params.x, + output_tensor, + sigma, + ucache_step_index); + }; + + auto ucache_after_condition = [&](const SDCondition* condition, struct ggml_tensor* output_tensor) { + if (!ucache_step_active || condition == nullptr || output_tensor == nullptr) { + return; + } + ucache_state.after_condition(condition, + diffusion_params.x, + output_tensor); + }; + + auto ucache_step_is_skipped = [&]() { + return ucache_step_active && ucache_state.is_step_skipped(); + }; + + const bool cachedit_step_active = cachedit_enabled && step > 0; + int cachedit_step_index = cachedit_step_active ? (step - 1) : -1; + if (cachedit_step_active) { + cachedit_state.begin_step(cachedit_step_index, sigma); + } + + auto cachedit_before_condition = [&](const SDCondition* condition, struct ggml_tensor* output_tensor) -> bool { + if (!cachedit_step_active || condition == nullptr || output_tensor == nullptr) { + return false; + } + return cachedit_state.before_condition(condition, + diffusion_params.x, + output_tensor, + sigma, + cachedit_step_index); + }; + + auto cachedit_after_condition = [&](const SDCondition* condition, struct ggml_tensor* output_tensor) { + if (!cachedit_step_active || condition == nullptr || output_tensor == nullptr) { + return; + } + cachedit_state.after_condition(condition, + diffusion_params.x, + output_tensor); + }; + + auto cachedit_step_is_skipped = [&]() { + return cachedit_step_active && cachedit_state.is_step_skipped(); + }; + + auto cache_before_condition = [&](const SDCondition* condition, struct ggml_tensor* output_tensor) -> bool { + if (easycache_step_active) { + return easycache_before_condition(condition, output_tensor); + } else if (ucache_step_active) { + return ucache_before_condition(condition, output_tensor); + } else if (cachedit_step_active) { + return cachedit_before_condition(condition, output_tensor); + } + return false; + }; + + auto cache_after_condition = [&](const SDCondition* condition, struct ggml_tensor* output_tensor) { + if (easycache_step_active) { + easycache_after_condition(condition, output_tensor); + } else if (ucache_step_active) { + ucache_after_condition(condition, output_tensor); + } else if (cachedit_step_active) { + cachedit_after_condition(condition, output_tensor); + } + }; + + auto cache_step_is_skipped = [&]() { + return easycache_step_is_skipped() || ucache_step_is_skipped() || cachedit_step_is_skipped(); + }; + std::vector scaling = denoiser->get_scalings(sigma); GGML_ASSERT(scaling.size() == 3); float c_skip = scaling[0]; @@ -1655,6 +2007,9 @@ public: shifted_t = std::max((int64_t)0, std::min((int64_t)(TIMESTEPS - 1), shifted_t)); LOG_DEBUG("shifting timestep from %.2f to %" PRId64 " (sigma: %.4f)", t, shifted_t, sigma); timesteps_vec.assign(1, (float)shifted_t); + } else if (sd_version_is_anima(version)) { + // Anima uses normalized flow timesteps. + timesteps_vec.assign(1, t / static_cast(TIMESTEPS)); } else if (sd_version_is_z_image(version)) { timesteps_vec.assign(1, 1000.f - t); } else { @@ -1716,7 +2071,7 @@ public: active_condition = &id_cond; } - bool skip_model = easycache_before_condition(active_condition, *active_output); + bool skip_model = cache_before_condition(active_condition, *active_output); if (!skip_model) { if (!work_diffusion_model->compute(n_threads, diffusion_params, @@ -1724,10 +2079,10 @@ public: LOG_ERROR("diffusion model compute failed"); return nullptr; } - easycache_after_condition(active_condition, *active_output); + cache_after_condition(active_condition, *active_output); } - bool current_step_skipped = easycache_step_is_skipped(); + bool current_step_skipped = cache_step_is_skipped(); float* negative_data = nullptr; if (has_unconditioned) { @@ -1739,12 +2094,12 @@ public: LOG_ERROR("controlnet compute failed"); } } - current_step_skipped = easycache_step_is_skipped(); + current_step_skipped = cache_step_is_skipped(); diffusion_params.controls = controls; diffusion_params.context = uncond.c_crossattn; diffusion_params.c_concat = uncond.c_concat; diffusion_params.y = uncond.c_vector; - bool skip_uncond = easycache_before_condition(&uncond, out_uncond); + bool skip_uncond = cache_before_condition(&uncond, out_uncond); if (!skip_uncond) { if (!work_diffusion_model->compute(n_threads, diffusion_params, @@ -1752,7 +2107,7 @@ public: LOG_ERROR("diffusion model compute failed"); return nullptr; } - easycache_after_condition(&uncond, out_uncond); + cache_after_condition(&uncond, out_uncond); } negative_data = (float*)out_uncond->data; } @@ -1762,7 +2117,7 @@ public: diffusion_params.context = img_cond.c_crossattn; diffusion_params.c_concat = img_cond.c_concat; diffusion_params.y = img_cond.c_vector; - bool skip_img_cond = easycache_before_condition(&img_cond, out_img_cond); + bool skip_img_cond = cache_before_condition(&img_cond, out_img_cond); if (!skip_img_cond) { if (!work_diffusion_model->compute(n_threads, diffusion_params, @@ -1770,17 +2125,17 @@ public: LOG_ERROR("diffusion model compute failed"); return nullptr; } - easycache_after_condition(&img_cond, out_img_cond); + cache_after_condition(&img_cond, out_img_cond); } img_cond_data = (float*)out_img_cond->data; } - int step_count = sigmas.size(); + int step_count = static_cast(sigmas.size()); bool is_skiplayer_step = has_skiplayer && step > (int)(guidance.slg.layer_start * step_count) && step < (int)(guidance.slg.layer_end * step_count); float* skip_layer_data = has_skiplayer ? (float*)out_skip->data : nullptr; if (is_skiplayer_step) { LOG_DEBUG("Skipping layers at step %d\n", step); - if (!easycache_step_is_skipped()) { + if (!cache_step_is_skipped()) { // skip layer (same as conditioned) diffusion_params.context = cond.c_crossattn; diffusion_params.c_concat = cond.c_concat; @@ -1884,6 +2239,48 @@ public: } } + if (ucache_enabled) { + size_t total_steps = sigmas.size() > 0 ? sigmas.size() - 1 : 0; + if (ucache_state.total_steps_skipped > 0 && total_steps > 0) { + if (ucache_state.total_steps_skipped < static_cast(total_steps)) { + double speedup = static_cast(total_steps) / + static_cast(total_steps - ucache_state.total_steps_skipped); + LOG_INFO("UCache skipped %d/%zu steps (%.2fx estimated speedup)", + ucache_state.total_steps_skipped, + total_steps, + speedup); + } else { + LOG_INFO("UCache skipped %d/%zu steps", + ucache_state.total_steps_skipped, + total_steps); + } + } else if (total_steps > 0) { + LOG_INFO("UCache completed without skipping steps"); + } + } + + if (cachedit_enabled) { + size_t total_steps = sigmas.size() > 0 ? sigmas.size() - 1 : 0; + if (cachedit_state.total_steps_skipped > 0 && total_steps > 0) { + if (cachedit_state.total_steps_skipped < static_cast(total_steps)) { + double speedup = static_cast(total_steps) / + static_cast(total_steps - cachedit_state.total_steps_skipped); + LOG_INFO("CacheDIT skipped %d/%zu steps (%.2fx estimated speedup), accum_diff: %.4f", + cachedit_state.total_steps_skipped, + total_steps, + speedup, + cachedit_state.accumulated_residual_diff); + } else { + LOG_INFO("CacheDIT skipped %d/%zu steps, accum_diff: %.4f", + cachedit_state.total_steps_skipped, + total_steps, + cachedit_state.accumulated_residual_diff); + } + } else if (total_steps > 0) { + LOG_INFO("CacheDIT completed without skipping steps"); + } + } + if (inverse_noise_scaling) { x = denoiser->inverse_noise_scaling(sigmas[sigmas.size() - 1], x); } @@ -2024,7 +2421,7 @@ public: } void process_latent_in(ggml_tensor* latent) { - if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_flux2(version)) { + if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version) || sd_version_is_flux2(version)) { int channel_dim = sd_version_is_flux2(version) ? 2 : 3; std::vector latents_mean_vec; std::vector latents_std_vec; @@ -2063,7 +2460,7 @@ public: } void process_latent_out(ggml_tensor* latent) { - if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_flux2(version)) { + if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version) || sd_version_is_flux2(version)) { int channel_dim = sd_version_is_flux2(version) ? 2 : 3; std::vector latents_mean_vec; std::vector latents_std_vec; @@ -2105,11 +2502,11 @@ public: int& tile_size_y, float& tile_overlap, const sd_tiling_params_t& params, - int latent_x, - int latent_y, + int64_t latent_x, + int64_t latent_y, float encoding_factor = 1.0f) { tile_overlap = std::max(std::min(params.target_overlap, 0.5f), 0.0f); - auto get_tile_size = [&](int requested_size, float factor, int latent_size) { + auto get_tile_size = [&](int requested_size, float factor, int64_t latent_size) { const int default_tile_size = 32; const int min_tile_dimension = 4; int tile_size = default_tile_size; @@ -2118,12 +2515,12 @@ public: if (factor > 0.f) { if (factor > 1.0) factor = 1 / (factor - factor * tile_overlap + tile_overlap); - tile_size = std::round(latent_size * factor); + tile_size = static_cast(std::round(latent_size * factor)); } else if (requested_size >= min_tile_dimension) { tile_size = requested_size; } - tile_size *= encoding_factor; - return std::max(std::min(tile_size, latent_size), min_tile_dimension); + tile_size = static_cast(tile_size * encoding_factor); + return std::max(std::min(tile_size, static_cast(latent_size)), min_tile_dimension); }; tile_size_x = get_tile_size(params.tile_size_x, params.rel_size_x, latent_x); @@ -2134,27 +2531,32 @@ public: int64_t t0 = ggml_time_ms(); ggml_tensor* result = nullptr; const int vae_scale_factor = get_vae_scale_factor(); - int W = x->ne[0] / vae_scale_factor; - int H = x->ne[1] / vae_scale_factor; - int C = get_latent_channel(); + int64_t W = x->ne[0] / vae_scale_factor; + int64_t H = x->ne[1] / vae_scale_factor; + int64_t C = get_latent_channel(); if (vae_tiling_params.enabled && !encode_video) { // TODO wan2.2 vae support? - int ne2; - int ne3; - if (sd_version_is_qwen_image(version)) { + int64_t ne2; + int64_t ne3; + if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { ne2 = 1; ne3 = C * x->ne[3]; } else { - if (!use_tiny_autoencoder) { - C *= 2; + int64_t out_channels = C; + bool encode_outputs_mu = use_tiny_autoencoder || + sd_version_is_wan(version) || + sd_version_is_flux2(version) || + version == VERSION_CHROMA_RADIANCE; + if (!encode_outputs_mu) { + out_channels *= 2; } - ne2 = C; + ne2 = out_channels; ne3 = x->ne[3]; } result = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, ne2, ne3); } - if (sd_version_is_qwen_image(version)) { + if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]); } @@ -2169,7 +2571,7 @@ public: LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y); auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - first_stage_model->compute(n_threads, in, false, &out, work_ctx); + return first_stage_model->compute(n_threads, in, false, &out, work_ctx); }; sd_tiling_non_square(x, result, vae_scale_factor, tile_size_x, tile_size_y, tile_overlap, on_tiling); } else { @@ -2180,7 +2582,7 @@ public: if (vae_tiling_params.enabled && !encode_video) { // split latent in 32x32 tiles and compute in several steps auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - tae_first_stage->compute(n_threads, in, false, &out, nullptr); + return tae_first_stage->compute(n_threads, in, false, &out, nullptr); }; sd_tiling(x, result, vae_scale_factor, 64, 0.5f, on_tiling); } else { @@ -2227,6 +2629,7 @@ public: ggml_tensor* latent; if (use_tiny_autoencoder || sd_version_is_qwen_image(version) || + sd_version_is_anima(version) || sd_version_is_wan(version) || sd_version_is_flux2(version) || version == VERSION_CHROMA_RADIANCE) { @@ -2246,7 +2649,7 @@ public: if (!use_tiny_autoencoder) { process_latent_in(latent); } - if (sd_version_is_qwen_image(version)) { + if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { latent = ggml_reshape_4d(work_ctx, latent, latent->ne[0], latent->ne[1], latent->ne[3], 1); } return latent; @@ -2264,7 +2667,7 @@ public: int64_t C = 3; ggml_tensor* result = nullptr; if (decode_video) { - int T = x->ne[2]; + int64_t T = x->ne[2]; if (sd_version_is_wan(version)) { T = ((T - 1) * 4) + 1; } @@ -2284,12 +2687,12 @@ public: } int64_t t0 = ggml_time_ms(); if (!use_tiny_autoencoder) { - if (sd_version_is_qwen_image(version)) { + if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]); } process_latent_out(x); // x = load_tensor_from_file(work_ctx, "wan_vae_z.bin"); - if (vae_tiling_params.enabled && !decode_video) { + if (vae_tiling_params.enabled) { float tile_overlap; int tile_size_x, tile_size_y; get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, vae_tiling_params, x->ne[0], x->ne[1]); @@ -2298,23 +2701,31 @@ public: // split latent in 32x32 tiles and compute in several steps auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - first_stage_model->compute(n_threads, in, true, &out, nullptr); + return first_stage_model->compute(n_threads, in, true, &out, nullptr); }; sd_tiling_non_square(x, result, vae_scale_factor, tile_size_x, tile_size_y, tile_overlap, on_tiling); } else { - first_stage_model->compute(n_threads, x, true, &result, work_ctx); + if (!first_stage_model->compute(n_threads, x, true, &result, work_ctx)) { + LOG_ERROR("Failed to decode latetnts"); + first_stage_model->free_compute_buffer(); + return nullptr; + } } first_stage_model->free_compute_buffer(); process_vae_output_tensor(result); } else { - if (vae_tiling_params.enabled && !decode_video) { + if (vae_tiling_params.enabled) { // split latent in 64x64 tiles and compute in several steps auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - tae_first_stage->compute(n_threads, in, true, &out); + return tae_first_stage->compute(n_threads, in, true, &out); }; sd_tiling(x, result, vae_scale_factor, 64, 0.5f, on_tiling); } else { - tae_first_stage->compute(n_threads, x, true, &result); + if (!tae_first_stage->compute(n_threads, x, true, &result)) { + LOG_ERROR("Failed to decode latetnts"); + tae_first_stage->free_compute_buffer(); + return nullptr; + } } tae_first_stage->free_compute_buffer(); } @@ -2324,6 +2735,16 @@ public: ggml_ext_tensor_clamp_inplace(result, 0.0f, 1.0f); return result; } + + void set_flow_shift(float flow_shift = INFINITY) { + auto flow_denoiser = std::dynamic_pointer_cast(denoiser); + if (flow_denoiser) { + if (flow_shift == INFINITY) { + flow_shift = default_flow_shift; + } + flow_denoiser->set_shift(flow_shift); + } + } }; /*================================================= SD API ==================================================*/ @@ -2382,6 +2803,8 @@ const char* sample_method_to_str[] = { "lcm", "ddim_trailing", "tcd", + "res_multistep", + "res_2s", }; const char* sd_sample_method_name(enum sample_method_t sample_method) { @@ -2409,7 +2832,9 @@ const char* scheduler_to_str[] = { "sgm_uniform", "simple", "smoothstep", + "kl_optimal", "lcm", + "bong_tangent", }; const char* sd_scheduler_name(enum scheduler_t scheduler) { @@ -2498,12 +2923,25 @@ enum lora_apply_mode_t str_to_lora_apply_mode(const char* str) { return LORA_APPLY_MODE_COUNT; } -void sd_easycache_params_init(sd_easycache_params_t* easycache_params) { - *easycache_params = {}; - easycache_params->enabled = false; - easycache_params->reuse_threshold = 0.2f; - easycache_params->start_percent = 0.15f; - easycache_params->end_percent = 0.95f; +void sd_cache_params_init(sd_cache_params_t* cache_params) { + *cache_params = {}; + cache_params->mode = SD_CACHE_DISABLED; + cache_params->reuse_threshold = 1.0f; + cache_params->start_percent = 0.15f; + cache_params->end_percent = 0.95f; + cache_params->error_decay_rate = 1.0f; + cache_params->use_relative_threshold = true; + cache_params->reset_error_on_compute = true; + cache_params->Fn_compute_blocks = 8; + cache_params->Bn_compute_blocks = 0; + cache_params->residual_diff_threshold = 0.08f; + cache_params->max_warmup_steps = 8; + cache_params->max_cached_steps = -1; + cache_params->max_continuous_cached_steps = -1; + cache_params->taylorseer_n_derivatives = 1; + cache_params->taylorseer_skip_interval = 1; + cache_params->scm_mask = nullptr; + cache_params->scm_policy_dynamic = true; } void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { @@ -2517,14 +2955,16 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { sd_ctx_params->prediction = PREDICTION_COUNT; sd_ctx_params->lora_apply_mode = LORA_APPLY_AUTO; sd_ctx_params->offload_params_to_cpu = false; + sd_ctx_params->enable_mmap = false; sd_ctx_params->keep_clip_on_cpu = false; sd_ctx_params->keep_control_net_on_cpu = false; sd_ctx_params->keep_vae_on_cpu = false; sd_ctx_params->diffusion_flash_attn = false; + sd_ctx_params->circular_x = false; + sd_ctx_params->circular_y = false; sd_ctx_params->chroma_use_dit_mask = true; sd_ctx_params->chroma_use_t5_mask = false; sd_ctx_params->chroma_t5_mask_pad = 1; - sd_ctx_params->flow_shift = INFINITY; } char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { @@ -2559,7 +2999,10 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { "keep_clip_on_cpu: %s\n" "keep_control_net_on_cpu: %s\n" "keep_vae_on_cpu: %s\n" + "flash_attn: %s\n" "diffusion_flash_attn: %s\n" + "circular_x: %s\n" + "circular_y: %s\n" "chroma_use_dit_mask: %s\n" "chroma_use_t5_mask: %s\n" "chroma_t5_mask_pad: %d\n", @@ -2588,7 +3031,10 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { BOOL_STR(sd_ctx_params->keep_clip_on_cpu), BOOL_STR(sd_ctx_params->keep_control_net_on_cpu), BOOL_STR(sd_ctx_params->keep_vae_on_cpu), + BOOL_STR(sd_ctx_params->flash_attn), BOOL_STR(sd_ctx_params->diffusion_flash_attn), + BOOL_STR(sd_ctx_params->circular_x), + BOOL_STR(sd_ctx_params->circular_y), BOOL_STR(sd_ctx_params->chroma_use_dit_mask), BOOL_STR(sd_ctx_params->chroma_use_t5_mask), sd_ctx_params->chroma_t5_mask_pad); @@ -2610,6 +3056,7 @@ void sd_sample_params_init(sd_sample_params_t* sample_params) { sample_params->sample_steps = 20; sample_params->custom_sigmas = nullptr; sample_params->custom_sigmas_count = 0; + sample_params->flow_shift = INFINITY; } char* sd_sample_params_to_str(const sd_sample_params_t* sample_params) { @@ -2630,7 +3077,8 @@ char* sd_sample_params_to_str(const sd_sample_params_t* sample_params) { "sample_method: %s, " "sample_steps: %d, " "eta: %.2f, " - "shifted_timestep: %d)", + "shifted_timestep: %d, " + "flow_shift: %.2f)", sample_params->guidance.txt_cfg, std::isfinite(sample_params->guidance.img_cfg) ? sample_params->guidance.img_cfg @@ -2644,7 +3092,8 @@ char* sd_sample_params_to_str(const sd_sample_params_t* sample_params) { sd_sample_method_name(sample_params->sample_method), sample_params->sample_steps, sample_params->eta, - sample_params->shifted_timestep); + sample_params->shifted_timestep, + sample_params->flow_shift); return buf; } @@ -2662,7 +3111,7 @@ void sd_img_gen_params_init(sd_img_gen_params_t* sd_img_gen_params) { sd_img_gen_params->control_strength = 0.9f; sd_img_gen_params->pm_params = {nullptr, 0, nullptr, 20.f}; sd_img_gen_params->vae_tiling_params = {false, 0, 0, 0.5f, 0.0f, 0.0f}; - sd_easycache_params_init(&sd_img_gen_params->easycache); + sd_cache_params_init(&sd_img_gen_params->cache); } char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) { @@ -2682,6 +3131,7 @@ char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) { "sample_params: %s\n" "strength: %.2f\n" "seed: %" PRId64 + "\n" "batch_count: %d\n" "ref_images_count: %d\n" "auto_resize_ref_image: %s\n" @@ -2706,12 +3156,18 @@ char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) { sd_img_gen_params->pm_params.id_images_count, SAFE_STR(sd_img_gen_params->pm_params.id_embed_path), BOOL_STR(sd_img_gen_params->vae_tiling_params.enabled)); + const char* cache_mode_str = "disabled"; + if (sd_img_gen_params->cache.mode == SD_CACHE_EASYCACHE) { + cache_mode_str = "easycache"; + } else if (sd_img_gen_params->cache.mode == SD_CACHE_UCACHE) { + cache_mode_str = "ucache"; + } snprintf(buf + strlen(buf), 4096 - strlen(buf), - "easycache: %s (threshold=%.3f, start=%.2f, end=%.2f)\n", - sd_img_gen_params->easycache.enabled ? "enabled" : "disabled", - sd_img_gen_params->easycache.reuse_threshold, - sd_img_gen_params->easycache.start_percent, - sd_img_gen_params->easycache.end_percent); + "cache: %s (threshold=%.3f, start=%.2f, end=%.2f)\n", + cache_mode_str, + sd_img_gen_params->cache.reuse_threshold, + sd_img_gen_params->cache.start_percent, + sd_img_gen_params->cache.end_percent); free(sample_params_str); return buf; } @@ -2728,7 +3184,8 @@ void sd_vid_gen_params_init(sd_vid_gen_params_t* sd_vid_gen_params) { sd_vid_gen_params->video_frames = 6; sd_vid_gen_params->moe_boundary = 0.875f; sd_vid_gen_params->vace_strength = 1.f; - sd_easycache_params_init(&sd_vid_gen_params->easycache); + sd_vid_gen_params->vae_tiling_params = {false, 0, 0, 0.5f, 0.0f, 0.0f}; + sd_cache_params_init(&sd_vid_gen_params->cache); } struct sd_ctx_t { @@ -2773,13 +3230,16 @@ enum sample_method_t sd_get_default_sample_method(const sd_ctx_t* sd_ctx) { return EULER_A_SAMPLE_METHOD; } -enum scheduler_t sd_get_default_scheduler(const sd_ctx_t* sd_ctx) { +enum scheduler_t sd_get_default_scheduler(const sd_ctx_t* sd_ctx, enum sample_method_t sample_method) { if (sd_ctx != nullptr && sd_ctx->sd != nullptr) { auto edm_v_denoiser = std::dynamic_pointer_cast(sd_ctx->sd->denoiser); if (edm_v_denoiser) { return EXPONENTIAL_SCHEDULER; } } + if (sample_method == LCM_SAMPLE_METHOD) { + return LCM_SCHEDULER; + } return DISCRETE_SCHEDULER; } @@ -2804,9 +3264,9 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, std::vector ref_images, std::vector ref_latents, bool increase_ref_index, - ggml_tensor* concat_latent = nullptr, - ggml_tensor* denoise_mask = nullptr, - const sd_easycache_params_t* easycache_params = nullptr) { + ggml_tensor* concat_latent = nullptr, + ggml_tensor* denoise_mask = nullptr, + const sd_cache_params_t* cache_params = nullptr) { if (seed < 0) { // Generally, when using the provided command line, the seed is always >0. // However, to prevent potential issues if 'stable-diffusion.cpp' is invoked as a library @@ -2819,114 +3279,22 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, guidance.img_cfg = guidance.txt_cfg; } - // for (auto v : sigmas) { - // std::cout << v << " "; - // } - // std::cout << std::endl; - - int sample_steps = sigmas.size() - 1; + int sample_steps = static_cast(sigmas.size() - 1); int64_t t0 = ggml_time_ms(); - // Photo Maker - std::string prompt_text_only; - ggml_tensor* init_img = nullptr; - SDCondition id_cond; - std::vector class_tokens_mask; - ConditionerParams condition_params; + condition_params.text = prompt; condition_params.clip_skip = clip_skip; condition_params.width = width; condition_params.height = height; condition_params.ref_images = ref_images; - condition_params.adm_in_channels = sd_ctx->sd->diffusion_model->get_adm_in_channels(); + condition_params.adm_in_channels = static_cast(sd_ctx->sd->diffusion_model->get_adm_in_channels()); - if (sd_ctx->sd->stacked_id) { - if (!sd_ctx->sd->pmid_lora->applied) { - int64_t t0 = ggml_time_ms(); - sd_ctx->sd->pmid_lora->apply(sd_ctx->sd->tensors, sd_ctx->sd->version, sd_ctx->sd->n_threads); - int64_t t1 = ggml_time_ms(); - sd_ctx->sd->pmid_lora->applied = true; - LOG_INFO("pmid_lora apply completed, taking %.2fs", (t1 - t0) * 1.0f / 1000); - if (sd_ctx->sd->free_params_immediately) { - sd_ctx->sd->pmid_lora->free_params_buffer(); - } - } - // preprocess input id images - bool pmv2 = sd_ctx->sd->pmid_model->get_version() == PM_VERSION_2; - if (pm_params.id_images_count > 0) { - int clip_image_size = 224; - sd_ctx->sd->pmid_model->style_strength = pm_params.style_strength; - - init_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, clip_image_size, clip_image_size, 3, pm_params.id_images_count); - - std::vector processed_id_images; - for (int i = 0; i < pm_params.id_images_count; i++) { - sd_image_f32_t id_image = sd_image_t_to_sd_image_f32_t(pm_params.id_images[i]); - sd_image_f32_t processed_id_image = clip_preprocess(id_image, clip_image_size, clip_image_size); - free(id_image.data); - id_image.data = nullptr; - processed_id_images.push_back(processed_id_image); - } - - ggml_ext_tensor_iter(init_img, [&](ggml_tensor* init_img, int64_t i0, int64_t i1, int64_t i2, int64_t i3) { - float value = sd_image_get_f32(processed_id_images[i3], i0, i1, i2, false); - ggml_ext_tensor_set_f32(init_img, value, i0, i1, i2, i3); - }); - - for (auto& image : processed_id_images) { - free(image.data); - image.data = nullptr; - } - processed_id_images.clear(); - - int64_t t0 = ggml_time_ms(); - condition_params.text = prompt; - condition_params.num_input_imgs = pm_params.id_images_count; - auto cond_tup = sd_ctx->sd->cond_stage_model->get_learned_condition_with_trigger(work_ctx, - sd_ctx->sd->n_threads, - condition_params); - id_cond = std::get<0>(cond_tup); - class_tokens_mask = std::get<1>(cond_tup); // - struct ggml_tensor* id_embeds = nullptr; - if (pmv2 && pm_params.id_embed_path != nullptr) { - id_embeds = load_tensor_from_file(work_ctx, pm_params.id_embed_path); - // print_ggml_tensor(id_embeds, true, "id_embeds:"); - } - if (pmv2 && id_embeds == nullptr) { - LOG_WARN("Provided PhotoMaker images, but NO valid ID embeds file for PM v2"); - LOG_WARN("Turn off PhotoMaker"); - sd_ctx->sd->stacked_id = false; - } else { - if (pmv2 && pm_params.id_images_count != id_embeds->ne[1]) { - LOG_WARN("PhotoMaker image count (%d) does NOT match ID embeds (%d). You should run face_detect.py again.", pm_params.id_images_count, id_embeds->ne[1]); - LOG_WARN("Turn off PhotoMaker"); - sd_ctx->sd->stacked_id = false; - } else { - id_cond.c_crossattn = sd_ctx->sd->id_encoder(work_ctx, init_img, id_cond.c_crossattn, id_embeds, class_tokens_mask); - int64_t t1 = ggml_time_ms(); - LOG_INFO("Photomaker ID Stacking, taking %" PRId64 " ms", t1 - t0); - if (sd_ctx->sd->free_params_immediately) { - sd_ctx->sd->pmid_model->free_params_buffer(); - } - // Encode input prompt without the trigger word for delayed conditioning - prompt_text_only = sd_ctx->sd->cond_stage_model->remove_trigger_from_prompt(work_ctx, prompt); - // printf("%s || %s \n", prompt.c_str(), prompt_text_only.c_str()); - prompt = prompt_text_only; // - if (sample_steps < 50) { - LOG_WARN("It's recommended to use >= 50 steps for photo maker!"); - } - } - } - } else { - LOG_WARN("Provided PhotoMaker model file, but NO input ID images"); - LOG_WARN("Turn off PhotoMaker"); - sd_ctx->sd->stacked_id = false; - } - } + // Photo Maker + SDCondition id_cond = sd_ctx->sd->get_pmid_conditon(work_ctx, pm_params, condition_params); // Get learned condition - condition_params.text = prompt; condition_params.zero_out_masked = false; SDCondition cond = sd_ctx->sd->cond_stage_model->get_learned_condition(work_ctx, sd_ctx->sd->n_threads, @@ -3066,7 +3434,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, ggml_ext_im_set_randn_f32(noise, sd_ctx->sd->rng); int start_merge_step = -1; - if (sd_ctx->sd->stacked_id) { + if (sd_ctx->sd->use_pmid) { start_merge_step = int(sd_ctx->sd->pmid_model->style_strength / 100.f * sample_steps); // if (start_merge_step > 30) // start_merge_step = 30; @@ -3095,7 +3463,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, denoise_mask, nullptr, 1.0f, - easycache_params); + cache_params); int64_t sampling_end = ggml_time_ms(); if (x_0 != nullptr) { // print_ggml_tensor(x_0); @@ -3139,6 +3507,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx, ggml_free(work_ctx); return nullptr; } + memset(result_images, 0, batch_count * sizeof(sd_image_t)); for (size_t i = 0; i < decoded_images.size(); i++) { result_images[i].width = width; @@ -3195,6 +3564,8 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g size_t t0 = ggml_time_ms(); + sd_ctx->sd->set_flow_shift(sd_img_gen_params->sample_params.flow_shift); + // Apply lora sd_ctx->sd->apply_loras(sd_img_gen_params->loras, sd_img_gen_params->lora_count); @@ -3214,9 +3585,13 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g LOG_WARN("sample_steps != custom_sigmas_count - 1, set sample_steps to %d", sample_steps); } } else { + scheduler_t scheduler = sd_img_gen_params->sample_params.scheduler; + if (scheduler == SCHEDULER_COUNT) { + scheduler = sd_get_default_scheduler(sd_ctx, sample_method); + } sigmas = sd_ctx->sd->denoiser->get_sigmas(sample_steps, sd_ctx->sd->get_image_seq_len(height, width), - sd_img_gen_params->sample_params.scheduler, + scheduler, sd_ctx->sd->version); } @@ -3429,7 +3804,7 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g sd_img_gen_params->increase_ref_index, concat_latent, denoise_mask, - &sd_img_gen_params->easycache); + &sd_img_gen_params->cache); size_t t2 = ggml_time_ms(); @@ -3442,6 +3817,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s if (sd_ctx == nullptr || sd_vid_gen_params == nullptr) { return nullptr; } + sd_ctx->sd->vae_tiling_params = sd_vid_gen_params->vae_tiling_params; std::string prompt = SAFE_STR(sd_vid_gen_params->prompt); std::string negative_prompt = SAFE_STR(sd_vid_gen_params->negative_prompt); @@ -3465,6 +3841,8 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s } LOG_INFO("generate_video %dx%dx%d", width, height, frames); + sd_ctx->sd->set_flow_shift(sd_vid_gen_params->sample_params.flow_shift); + enum sample_method_t sample_method = sd_vid_gen_params->sample_params.sample_method; if (sample_method == SAMPLE_METHOD_COUNT) { sample_method = sd_get_default_sample_method(sd_ctx); @@ -3499,9 +3877,13 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s } } } else { + scheduler_t scheduler = sd_vid_gen_params->sample_params.scheduler; + if (scheduler == SCHEDULER_COUNT) { + scheduler = sd_get_default_scheduler(sd_ctx, sample_method); + } sigmas = sd_ctx->sd->denoiser->get_sigmas(total_steps, 0, - sd_vid_gen_params->sample_params.scheduler, + scheduler, sd_ctx->sd->version); } @@ -3509,7 +3891,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s // timesteps ∝ sigmas for Flow models (like wan2.2 a14b) for (size_t i = 0; i < sigmas.size(); ++i) { if (sigmas[i] < sd_vid_gen_params->moe_boundary) { - high_noise_sample_steps = i; + high_noise_sample_steps = static_cast(i); break; } } @@ -3704,7 +4086,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s int64_t length = inactive->ne[2]; if (ref_image_latent) { length += 1; - frames = (length - 1) * 4 + 1; + frames = static_cast((length - 1) * 4 + 1); ref_image_num = 1; } vace_context = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, inactive->ne[0], inactive->ne[1], length, 96); // [b*96, t, h/vae_scale_factor, w/vae_scale_factor] @@ -3770,7 +4152,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s int W = width / vae_scale_factor; int H = height / vae_scale_factor; - int T = init_latent->ne[2]; + int T = static_cast(init_latent->ne[2]); int C = sd_ctx->sd->get_latent_channel(); struct ggml_tensor* final_latent; @@ -3813,7 +4195,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s denoise_mask, vace_context, sd_vid_gen_params->vace_strength, - &sd_vid_gen_params->easycache); + &sd_vid_gen_params->cache); int64_t sampling_end = ggml_time_ms(); LOG_INFO("sampling(high noise) completed, taking %.2fs", (sampling_end - sampling_start) * 1.0f / 1000); @@ -3850,7 +4232,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s denoise_mask, vace_context, sd_vid_gen_params->vace_strength, - &sd_vid_gen_params->easycache); + &sd_vid_gen_params->cache); int64_t sampling_end = ggml_time_ms(); LOG_INFO("sampling completed, taking %.2fs", (sampling_end - sampling_start) * 1.0f / 1000); @@ -3889,17 +4271,17 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s ggml_free(work_ctx); return nullptr; } - *num_frames_out = vid->ne[2]; + *num_frames_out = static_cast(vid->ne[2]); - for (size_t i = 0; i < vid->ne[2]; i++) { - result_images[i].width = vid->ne[0]; - result_images[i].height = vid->ne[1]; + for (int64_t i = 0; i < vid->ne[2]; i++) { + result_images[i].width = static_cast(vid->ne[0]); + result_images[i].height = static_cast(vid->ne[1]); result_images[i].channel = 3; - result_images[i].data = ggml_tensor_to_sd_image(vid, i, true); + result_images[i].data = ggml_tensor_to_sd_image(vid, static_cast(i), true); } ggml_free(work_ctx); LOG_INFO("generate_video completed in %.2fs", (t5 - t0) * 1.0f / 1000); return result_images; -} \ No newline at end of file +} diff --git a/t5.hpp b/src/t5.hpp similarity index 97% rename from t5.hpp rename to src/t5.hpp index 4370a567..d789c5bd 100644 --- a/t5.hpp +++ b/src/t5.hpp @@ -14,6 +14,7 @@ #include "ggml_extend.hpp" #include "json.hpp" #include "model.h" +#include "vocab/vocab.h" // Port from: https://github.com/google/sentencepiece/blob/master/src/unigram_model.h // and https://github.com/google/sentencepiece/blob/master/src/unigram_model.h. @@ -96,7 +97,7 @@ protected: try { data = nlohmann::json::parse(json_str); - } catch (const nlohmann::json::parse_error& e) { + } catch (const nlohmann::json::parse_error&) { status_ = INVLIAD_JSON; return; } @@ -168,9 +169,9 @@ protected: kMaxTrieResultsSize); trie_results_size_ = 0; for (const auto& p : *pieces) { - const int num_nodes = trie_->commonPrefixSearch( + const size_t num_nodes = trie_->commonPrefixSearch( p.first.data(), results.data(), results.size(), p.first.size()); - trie_results_size_ = std::max(trie_results_size_, num_nodes); + trie_results_size_ = std::max(trie_results_size_, static_cast(num_nodes)); } if (trie_results_size_ == 0) @@ -268,7 +269,7 @@ protected: -1; // The starting position (in utf-8) of this node. The entire best // path can be constructed by backtracking along this link. }; - const int size = normalized.size(); + const int size = static_cast(normalized.size()); const float unk_score = min_score() - kUnkPenalty; // The ends are exclusive. std::vector best_path_ends_at(size + 1); @@ -281,7 +282,7 @@ protected: best_path_ends_at[starts_at].best_path_score; bool has_single_node = false; const int mblen = - std::min(OneCharLen(normalized.data() + starts_at), + std::min(static_cast(OneCharLen(normalized.data() + starts_at)), size - starts_at); while (key_pos < size) { const int ret = @@ -302,7 +303,7 @@ protected: score + best_path_score_till_here; if (target_node.starts_at == -1 || candidate_best_path_score > target_node.best_path_score) { - target_node.best_path_score = candidate_best_path_score; + target_node.best_path_score = static_cast(candidate_best_path_score); target_node.starts_at = starts_at; target_node.id = ret; } @@ -341,9 +342,9 @@ protected: public: explicit T5UniGramTokenizer(bool is_umt5 = false) { if (is_umt5) { - InitializePieces(ModelLoader::load_umt5_tokenizer_json()); + InitializePieces(load_umt5_tokenizer_json()); } else { - InitializePieces(ModelLoader::load_t5_tokenizer_json()); + InitializePieces(load_t5_tokenizer_json()); } min_score_ = FLT_MAX; @@ -394,7 +395,7 @@ public: bool padding = false) { if (max_length > 0 && padding) { size_t orig_token_num = tokens.size() - 1; - size_t n = std::ceil(orig_token_num * 1.0 / (max_length - 1)); + size_t n = static_cast(std::ceil(orig_token_num * 1.0 / (max_length - 1))); if (n == 0) { n = 1; } @@ -515,7 +516,7 @@ public: auto wi_1 = std::dynamic_pointer_cast(blocks["wi_1"]); auto wo = std::dynamic_pointer_cast(blocks["wo"]); - auto hidden_gelu = ggml_gelu_inplace(ctx->ggml_ctx, wi_0->forward(ctx, x)); + auto hidden_gelu = ggml_ext_gelu(ctx->ggml_ctx, wi_0->forward(ctx, x), true); auto hidden_linear = wi_1->forward(ctx, x); x = ggml_mul_inplace(ctx->ggml_ctx, hidden_gelu, hidden_linear); x = wo->forward(ctx, x); @@ -608,7 +609,7 @@ public: } } - k = ggml_scale_inplace(ctx->ggml_ctx, k, sqrt(d_head)); + k = ggml_ext_scale(ctx->ggml_ctx, k, ::sqrtf(static_cast(d_head)), true); x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, mask); // [N, n_token, d_head * n_head] @@ -797,7 +798,7 @@ struct T5Runner : public GGMLRunner { input_ids = to_backend(input_ids); attention_mask = to_backend(attention_mask); - relative_position_bucket_vec = compute_relative_position_bucket(input_ids->ne[0], input_ids->ne[0]); + relative_position_bucket_vec = compute_relative_position_bucket(static_cast(input_ids->ne[0]), static_cast(input_ids->ne[0])); // for (int i = 0; i < relative_position_bucket_vec.size(); i++) { // if (i % 77 == 0) { @@ -984,12 +985,12 @@ struct T5Embedder { auto attention_mask = vector_to_ggml_tensor(work_ctx, masks); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); model.compute(8, input_ids, attention_mask, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("t5 test done in %dms", t1 - t0); + LOG_DEBUG("t5 test done in %lldms", t1 - t0); } } diff --git a/tae.hpp b/src/tae.hpp similarity index 89% rename from tae.hpp rename to src/tae.hpp index 5da76e69..83152578 100644 --- a/tae.hpp +++ b/src/tae.hpp @@ -17,22 +17,43 @@ class TAEBlock : public UnaryBlock { protected: int n_in; int n_out; + bool use_midblock_gn; public: - TAEBlock(int n_in, int n_out) - : n_in(n_in), n_out(n_out) { + TAEBlock(int n_in, int n_out, bool use_midblock_gn = false) + : n_in(n_in), n_out(n_out), use_midblock_gn(use_midblock_gn) { blocks["conv.0"] = std::shared_ptr(new Conv2d(n_in, n_out, {3, 3}, {1, 1}, {1, 1})); blocks["conv.2"] = std::shared_ptr(new Conv2d(n_out, n_out, {3, 3}, {1, 1}, {1, 1})); blocks["conv.4"] = std::shared_ptr(new Conv2d(n_out, n_out, {3, 3}, {1, 1}, {1, 1})); if (n_in != n_out) { blocks["skip"] = std::shared_ptr(new Conv2d(n_in, n_out, {1, 1}, {1, 1}, {1, 1}, {1, 1}, false)); } + if (use_midblock_gn) { + int n_gn = n_in * 4; + blocks["pool.0"] = std::shared_ptr(new Conv2d(n_in, n_gn, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false)); + blocks["pool.1"] = std::shared_ptr(new GroupNorm(4, n_gn)); + // pool.2 is ReLU, handled in forward + blocks["pool.3"] = std::shared_ptr(new Conv2d(n_gn, n_in, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false)); + } } struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { // x: [n, n_in, h, w] // return: [n, n_out, h, w] + if (use_midblock_gn) { + auto pool_0 = std::dynamic_pointer_cast(blocks["pool.0"]); + auto pool_1 = std::dynamic_pointer_cast(blocks["pool.1"]); + auto pool_3 = std::dynamic_pointer_cast(blocks["pool.3"]); + + auto p = pool_0->forward(ctx, x); + p = pool_1->forward(ctx, p); + p = ggml_relu_inplace(ctx->ggml_ctx, p); + p = pool_3->forward(ctx, p); + + x = ggml_add(ctx->ggml_ctx, x, p); + } + auto conv_0 = std::dynamic_pointer_cast(blocks["conv.0"]); auto conv_2 = std::dynamic_pointer_cast(blocks["conv.2"]); auto conv_4 = std::dynamic_pointer_cast(blocks["conv.4"]); @@ -62,7 +83,7 @@ class TinyEncoder : public UnaryBlock { int num_blocks = 3; public: - TinyEncoder(int z_channels = 4) + TinyEncoder(int z_channels = 4, bool use_midblock_gn = false) : z_channels(z_channels) { int index = 0; blocks[std::to_string(index++)] = std::shared_ptr(new Conv2d(in_channels, channels, {3, 3}, {1, 1}, {1, 1})); @@ -80,7 +101,7 @@ public: blocks[std::to_string(index++)] = std::shared_ptr(new Conv2d(channels, channels, {3, 3}, {2, 2}, {1, 1}, {1, 1}, false)); for (int i = 0; i < num_blocks; i++) { - blocks[std::to_string(index++)] = std::shared_ptr(new TAEBlock(channels, channels)); + blocks[std::to_string(index++)] = std::shared_ptr(new TAEBlock(channels, channels, use_midblock_gn)); } blocks[std::to_string(index++)] = std::shared_ptr(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1})); @@ -107,7 +128,7 @@ class TinyDecoder : public UnaryBlock { int num_blocks = 3; public: - TinyDecoder(int z_channels = 4) + TinyDecoder(int z_channels = 4, bool use_midblock_gn = false) : z_channels(z_channels) { int index = 0; @@ -115,7 +136,7 @@ public: index++; // nn.ReLU() for (int i = 0; i < num_blocks; i++) { - blocks[std::to_string(index++)] = std::shared_ptr(new TAEBlock(channels, channels)); + blocks[std::to_string(index++)] = std::shared_ptr(new TAEBlock(channels, channels, use_midblock_gn)); } index++; // nn.Upsample() blocks[std::to_string(index++)] = std::shared_ptr(new Conv2d(channels, channels, {3, 3}, {1, 1}, {1, 1}, {1, 1}, false)); @@ -140,9 +161,9 @@ public: // z: [n, z_channels, h, w] // return: [n, out_channels, h*8, w*8] - auto h = ggml_scale(ctx->ggml_ctx, z, 1.0f / 3.0f); + auto h = ggml_ext_scale(ctx->ggml_ctx, z, 1.0f / 3.0f); h = ggml_tanh_inplace(ctx->ggml_ctx, h); - h = ggml_scale(ctx->ggml_ctx, h, 3.0f); + h = ggml_ext_scale(ctx->ggml_ctx, h, 3.0f); for (int i = 0; i < num_blocks * 3 + 10; i++) { if (blocks.find(std::to_string(i)) == blocks.end()) { @@ -379,10 +400,11 @@ public: auto first_conv = std::dynamic_pointer_cast(blocks["1"]); // Clamp() - auto h = ggml_scale_inplace(ctx->ggml_ctx, - ggml_tanh_inplace(ctx->ggml_ctx, - ggml_scale(ctx->ggml_ctx, z, 1.0f / 3.0f)), - 3.0f); + auto h = ggml_ext_scale(ctx->ggml_ctx, + ggml_tanh_inplace(ctx->ggml_ctx, + ggml_ext_scale(ctx->ggml_ctx, z, 1.0f / 3.0f)), + 3.0f, + true); h = first_conv->forward(ctx, h); h = ggml_relu_inplace(ctx->ggml_ctx, h); @@ -470,29 +492,44 @@ public: class TAESD : public GGMLBlock { protected: bool decode_only; + bool taef2 = false; public: TAESD(bool decode_only = true, SDVersion version = VERSION_SD1) : decode_only(decode_only) { - int z_channels = 4; + int z_channels = 4; + bool use_midblock_gn = false; + taef2 = sd_version_is_flux2(version); + if (sd_version_is_dit(version)) { z_channels = 16; } - blocks["decoder.layers"] = std::shared_ptr(new TinyDecoder(z_channels)); + if (taef2) { + z_channels = 32; + use_midblock_gn = true; + } + blocks["decoder.layers"] = std::shared_ptr(new TinyDecoder(z_channels, use_midblock_gn)); if (!decode_only) { - blocks["encoder.layers"] = std::shared_ptr(new TinyEncoder(z_channels)); + blocks["encoder.layers"] = std::shared_ptr(new TinyEncoder(z_channels, use_midblock_gn)); } } struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) { auto decoder = std::dynamic_pointer_cast(blocks["decoder.layers"]); + if (taef2) { + z = unpatchify(ctx->ggml_ctx, z, 2); + } return decoder->forward(ctx, z); } struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) { auto encoder = std::dynamic_pointer_cast(blocks["encoder.layers"]); - return encoder->forward(ctx, x); + auto z = encoder->forward(ctx, x); + if (taef2) { + z = patchify(ctx->ggml_ctx, z, 2); + } + return z; } }; @@ -505,7 +542,8 @@ struct TinyAutoEncoder : public GGMLRunner { struct ggml_tensor** output, struct ggml_context* output_ctx = nullptr) = 0; - virtual bool load_from_file(const std::string& file_path, int n_threads) = 0; + virtual bool load_from_file(const std::string& file_path, int n_threads) = 0; + virtual void get_param_tensors(std::map& tensors, const std::string prefix) = 0; }; struct TinyImageAutoEncoder : public TinyAutoEncoder { @@ -555,6 +593,10 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder { return success; } + void get_param_tensors(std::map& tensors, const std::string prefix) { + taesd.get_param_tensors(tensors, prefix); + } + struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) { struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); z = to_backend(z); @@ -624,6 +666,10 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder { return success; } + void get_param_tensors(std::map& tensors, const std::string prefix) { + taehv.get_param_tensors(tensors, prefix); + } + struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) { struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); z = to_backend(z); diff --git a/tokenize_util.cpp b/src/tokenize_util.cpp similarity index 99% rename from tokenize_util.cpp rename to src/tokenize_util.cpp index bc0ff1d5..22cf8ae2 100644 --- a/tokenize_util.cpp +++ b/src/tokenize_util.cpp @@ -919,15 +919,21 @@ std::vector token_split(const std::string& text) { // `\s*[\r\n]+|\s+(?!\S)|\s+` if (is_space(cp)) { - std::string token = codepoint_to_utf8(cp); - ++i; + std::string token; + bool saw_new_line = false; while (i < cps.size() && is_space(cps[i])) { token += codepoint_to_utf8(cps[i]); - ++i; + if (cps[i] == U'\r' || cps[i] == U'\n') { - break; + saw_new_line = true; + } else { + if (saw_new_line) { + break; + } } + + ++i; } tokens.push_back(token); diff --git a/tokenize_util.h b/src/tokenize_util.h similarity index 100% rename from tokenize_util.h rename to src/tokenize_util.h diff --git a/src/ucache.hpp b/src/ucache.hpp new file mode 100644 index 00000000..d3247618 --- /dev/null +++ b/src/ucache.hpp @@ -0,0 +1,434 @@ +#ifndef __UCACHE_HPP__ +#define __UCACHE_HPP__ + +#include +#include +#include +#include + +#include "denoiser.hpp" +#include "ggml_extend.hpp" + +struct UCacheConfig { + bool enabled = false; + float reuse_threshold = 1.0f; + float start_percent = 0.15f; + float end_percent = 0.95f; + float error_decay_rate = 1.0f; + bool use_relative_threshold = true; + bool adaptive_threshold = true; + float early_step_multiplier = 0.5f; + float late_step_multiplier = 1.5f; + float relative_norm_gain = 1.6f; + bool reset_error_on_compute = true; +}; + +struct UCacheCacheEntry { + std::vector diff; +}; + +struct UCacheState { + UCacheConfig config; + Denoiser* denoiser = nullptr; + float start_sigma = std::numeric_limits::max(); + float end_sigma = 0.0f; + bool initialized = false; + bool initial_step = true; + bool skip_current_step = false; + bool step_active = false; + const SDCondition* anchor_condition = nullptr; + std::unordered_map cache_diffs; + std::vector prev_input; + std::vector prev_output; + float output_prev_norm = 0.0f; + bool has_prev_input = false; + bool has_prev_output = false; + bool has_output_prev_norm = false; + bool has_relative_transformation_rate = false; + float relative_transformation_rate = 0.0f; + float last_input_change = 0.0f; + bool has_last_input_change = false; + float output_change_ema = 0.0f; + bool has_output_change_ema = false; + int total_steps_skipped = 0; + int current_step_index = -1; + int steps_computed_since_active = 0; + int expected_total_steps = 0; + int consecutive_skipped_steps = 0; + float accumulated_error = 0.0f; + + struct BlockMetrics { + float sum_transformation_rate = 0.0f; + float sum_output_norm = 0.0f; + int sample_count = 0; + float min_change_rate = std::numeric_limits::max(); + float max_change_rate = 0.0f; + + void reset() { + sum_transformation_rate = 0.0f; + sum_output_norm = 0.0f; + sample_count = 0; + min_change_rate = std::numeric_limits::max(); + max_change_rate = 0.0f; + } + + void record(float change_rate, float output_norm) { + if (std::isfinite(change_rate) && change_rate > 0.0f) { + sum_transformation_rate += change_rate; + sum_output_norm += output_norm; + sample_count++; + if (change_rate < min_change_rate) + min_change_rate = change_rate; + if (change_rate > max_change_rate) + max_change_rate = change_rate; + } + } + + float avg_transformation_rate() const { + return (sample_count > 0) ? (sum_transformation_rate / sample_count) : 0.0f; + } + + float avg_output_norm() const { + return (sample_count > 0) ? (sum_output_norm / sample_count) : 0.0f; + } + }; + BlockMetrics block_metrics; + int total_active_steps = 0; + + void reset_runtime() { + initial_step = true; + skip_current_step = false; + step_active = false; + anchor_condition = nullptr; + cache_diffs.clear(); + prev_input.clear(); + prev_output.clear(); + output_prev_norm = 0.0f; + has_prev_input = false; + has_prev_output = false; + has_output_prev_norm = false; + has_relative_transformation_rate = false; + relative_transformation_rate = 0.0f; + last_input_change = 0.0f; + has_last_input_change = false; + output_change_ema = 0.0f; + has_output_change_ema = false; + total_steps_skipped = 0; + current_step_index = -1; + steps_computed_since_active = 0; + expected_total_steps = 0; + consecutive_skipped_steps = 0; + accumulated_error = 0.0f; + block_metrics.reset(); + total_active_steps = 0; + } + + void init(const UCacheConfig& cfg, Denoiser* d) { + config = cfg; + denoiser = d; + initialized = cfg.enabled && d != nullptr; + reset_runtime(); + if (initialized) { + start_sigma = percent_to_sigma(config.start_percent); + end_sigma = percent_to_sigma(config.end_percent); + } + } + + void set_sigmas(const std::vector& sigmas) { + if (!initialized || sigmas.size() < 2) { + return; + } + size_t n_steps = sigmas.size() - 1; + expected_total_steps = static_cast(n_steps); + + size_t start_step = static_cast(config.start_percent * n_steps); + size_t end_step = static_cast(config.end_percent * n_steps); + + if (start_step >= n_steps) + start_step = n_steps - 1; + if (end_step >= n_steps) + end_step = n_steps - 1; + + start_sigma = sigmas[start_step]; + end_sigma = sigmas[end_step]; + + if (start_sigma < end_sigma) { + std::swap(start_sigma, end_sigma); + } + } + + bool enabled() const { + return initialized && config.enabled; + } + + float percent_to_sigma(float percent) const { + if (!denoiser) { + return 0.0f; + } + if (percent <= 0.0f) { + return std::numeric_limits::max(); + } + if (percent >= 1.0f) { + return 0.0f; + } + float t = (1.0f - percent) * (TIMESTEPS - 1); + return denoiser->t_to_sigma(t); + } + + void begin_step(int step_index, float sigma) { + if (!enabled()) { + return; + } + if (step_index == current_step_index) { + return; + } + current_step_index = step_index; + skip_current_step = false; + has_last_input_change = false; + step_active = false; + + if (sigma > start_sigma) { + return; + } + if (!(sigma > end_sigma)) { + return; + } + step_active = true; + total_active_steps++; + } + + bool step_is_active() const { + return enabled() && step_active; + } + + bool is_step_skipped() const { + return enabled() && step_active && skip_current_step; + } + + float get_adaptive_threshold(int estimated_total_steps = 0) const { + float base_threshold = config.reuse_threshold; + + if (!config.adaptive_threshold) { + return base_threshold; + } + + int effective_total = estimated_total_steps; + if (effective_total <= 0) { + effective_total = expected_total_steps; + } + if (effective_total <= 0) { + effective_total = std::max(20, steps_computed_since_active * 2); + } + + float progress = (effective_total > 0) ? (static_cast(steps_computed_since_active) / effective_total) : 0.0f; + progress = std::max(0.0f, std::min(1.0f, progress)); + + float multiplier = 1.0f; + if (progress < 0.2f) { + multiplier = config.early_step_multiplier; + } else if (progress > 0.8f) { + multiplier = config.late_step_multiplier; + } + + return base_threshold * multiplier; + } + + bool has_cache(const SDCondition* cond) const { + auto it = cache_diffs.find(cond); + return it != cache_diffs.end() && !it->second.diff.empty(); + } + + void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { + UCacheCacheEntry& entry = cache_diffs[cond]; + size_t ne = static_cast(ggml_nelements(output)); + entry.diff.resize(ne); + float* out_data = (float*)output->data; + float* in_data = (float*)input->data; + + for (size_t i = 0; i < ne; ++i) { + entry.diff[i] = out_data[i] - in_data[i]; + } + } + + void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { + auto it = cache_diffs.find(cond); + if (it == cache_diffs.end() || it->second.diff.empty()) { + return; + } + + copy_ggml_tensor(output, input); + float* out_data = (float*)output->data; + const std::vector& diff = it->second.diff; + for (size_t i = 0; i < diff.size(); ++i) { + out_data[i] += diff[i]; + } + } + + bool before_condition(const SDCondition* cond, + ggml_tensor* input, + ggml_tensor* output, + float sigma, + int step_index) { + if (!enabled() || step_index < 0) { + return false; + } + if (step_index != current_step_index) { + begin_step(step_index, sigma); + } + if (!step_active) { + return false; + } + + if (initial_step) { + anchor_condition = cond; + initial_step = false; + } + + bool is_anchor = (cond == anchor_condition); + + if (skip_current_step) { + if (has_cache(cond)) { + apply_cache(cond, input, output); + return true; + } + return false; + } + + if (!is_anchor) { + return false; + } + + if (!has_prev_input || !has_prev_output || !has_cache(cond)) { + return false; + } + + size_t ne = static_cast(ggml_nelements(input)); + if (prev_input.size() != ne) { + return false; + } + + float* input_data = (float*)input->data; + last_input_change = 0.0f; + for (size_t i = 0; i < ne; ++i) { + last_input_change += std::fabs(input_data[i] - prev_input[i]); + } + if (ne > 0) { + last_input_change /= static_cast(ne); + } + has_last_input_change = true; + + if (has_output_prev_norm && has_relative_transformation_rate && + last_input_change > 0.0f && output_prev_norm > 0.0f) { + float approx_output_change = relative_transformation_rate * last_input_change; + float approx_output_change_rate; + if (config.use_relative_threshold) { + float base_scale = std::max(output_prev_norm, 1e-6f); + float dyn_scale = has_output_change_ema + ? std::max(output_change_ema * std::max(1.0f, config.relative_norm_gain), 1e-6f) + : base_scale; + float scale = std::sqrt(base_scale * dyn_scale); + approx_output_change_rate = approx_output_change / scale; + } else { + approx_output_change_rate = approx_output_change; + } + // Increase estimated error with skip horizon to avoid long extrapolation streaks + approx_output_change_rate *= (1.0f + 0.50f * consecutive_skipped_steps); + accumulated_error = accumulated_error * config.error_decay_rate + approx_output_change_rate; + + float effective_threshold = get_adaptive_threshold(); + if (!config.use_relative_threshold && output_prev_norm > 0.0f) { + effective_threshold = effective_threshold * output_prev_norm; + } + + if (accumulated_error < effective_threshold) { + skip_current_step = true; + total_steps_skipped++; + consecutive_skipped_steps++; + apply_cache(cond, input, output); + return true; + } else if (config.reset_error_on_compute) { + accumulated_error = 0.0f; + } + } + + return false; + } + + void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { + if (!step_is_active()) { + return; + } + + update_cache(cond, input, output); + + if (cond != anchor_condition) { + return; + } + steps_computed_since_active++; + consecutive_skipped_steps = 0; + + size_t ne = static_cast(ggml_nelements(input)); + float* in_data = (float*)input->data; + prev_input.resize(ne); + for (size_t i = 0; i < ne; ++i) { + prev_input[i] = in_data[i]; + } + has_prev_input = true; + + float* out_data = (float*)output->data; + float output_change = 0.0f; + if (has_prev_output && prev_output.size() == ne) { + for (size_t i = 0; i < ne; ++i) { + output_change += std::fabs(out_data[i] - prev_output[i]); + } + if (ne > 0) { + output_change /= static_cast(ne); + } + } + if (std::isfinite(output_change) && output_change > 0.0f) { + if (!has_output_change_ema) { + output_change_ema = output_change; + has_output_change_ema = true; + } else { + output_change_ema = 0.8f * output_change_ema + 0.2f * output_change; + } + } + + prev_output.resize(ne); + for (size_t i = 0; i < ne; ++i) { + prev_output[i] = out_data[i]; + } + has_prev_output = true; + + float mean_abs = 0.0f; + for (size_t i = 0; i < ne; ++i) { + mean_abs += std::fabs(out_data[i]); + } + output_prev_norm = (ne > 0) ? (mean_abs / static_cast(ne)) : 0.0f; + has_output_prev_norm = output_prev_norm > 0.0f; + + if (has_last_input_change && last_input_change > 0.0f && output_change > 0.0f) { + float rate = output_change / last_input_change; + if (std::isfinite(rate)) { + relative_transformation_rate = rate; + has_relative_transformation_rate = true; + block_metrics.record(rate, output_prev_norm); + } + } + + has_last_input_change = false; + } + + void log_block_metrics() const { + if (block_metrics.sample_count > 0) { + LOG_INFO("UCacheBlockMetrics: samples=%d, avg_rate=%.4f, min=%.4f, max=%.4f, avg_norm=%.4f", + block_metrics.sample_count, + block_metrics.avg_transformation_rate(), + block_metrics.min_change_rate, + block_metrics.max_change_rate, + block_metrics.avg_output_norm()); + } + } +}; + +#endif // __UCACHE_HPP__ diff --git a/unet.hpp b/src/unet.hpp similarity index 96% rename from unet.hpp rename to src/unet.hpp index ec7578e4..e0fd4c52 100644 --- a/unet.hpp +++ b/src/unet.hpp @@ -1,8 +1,7 @@ #ifndef __UNET_HPP__ #define __UNET_HPP__ -#include "common.hpp" -#include "ggml_extend.hpp" +#include "common_block.hpp" #include "model.h" /*==================================================== UnetModel =====================================================*/ @@ -12,7 +11,7 @@ class SpatialVideoTransformer : public SpatialTransformer { protected: int64_t time_depth; - int64_t max_time_embed_period; + int max_time_embed_period; public: SpatialVideoTransformer(int64_t in_channels, @@ -21,8 +20,8 @@ public: int64_t depth, int64_t context_dim, bool use_linear, - int64_t time_depth = 1, - int64_t max_time_embed_period = 10000) + int64_t time_depth = 1, + int max_time_embed_period = 10000) : SpatialTransformer(in_channels, n_head, d_head, depth, context_dim, use_linear), max_time_embed_period(max_time_embed_period) { // We will convert unet transformer linear to conv2d 1x1 when loading the weights, so use_linear is always False @@ -112,9 +111,9 @@ public: x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 1, 2, 0, 3)); // [N, h, w, inner_dim] x = ggml_reshape_3d(ctx->ggml_ctx, x, inner_dim, w * h, n); // [N, h * w, inner_dim] - auto num_frames = ggml_arange(ctx->ggml_ctx, 0, timesteps, 1); + auto num_frames = ggml_arange(ctx->ggml_ctx, 0.f, static_cast(timesteps), 1.f); // since b is 1, no need to do repeat - auto t_emb = ggml_ext_timestep_embedding(ctx->ggml_ctx, num_frames, in_channels, max_time_embed_period); // [N, in_channels] + auto t_emb = ggml_ext_timestep_embedding(ctx->ggml_ctx, num_frames, static_cast(in_channels), max_time_embed_period); // [N, in_channels] auto emb = time_pos_embed_0->forward(ctx, t_emb); emb = ggml_silu_inplace(ctx->ggml_ctx, emb); @@ -201,6 +200,9 @@ public: num_head_channels = 64; num_heads = -1; use_linear_projection = true; + if (version == VERSION_SDXL_VEGA) { + transformer_depth = {1, 1, 2}; + } } else if (version == VERSION_SVD) { in_channels = 8; out_channels = 4; @@ -215,10 +217,13 @@ public: } else if (sd_version_is_unet_edit(version)) { in_channels = 8; } - if (version == VERSION_SD1_TINY_UNET || version == VERSION_SD2_TINY_UNET) { + if (version == VERSION_SD1_TINY_UNET || version == VERSION_SD2_TINY_UNET || version == VERSION_SDXS) { num_res_blocks = 1; channel_mult = {1, 2, 4}; tiny_unet = true; + if (version == VERSION_SDXS) { + attention_resolutions = {4, 2}; // here just like SDXL + } } // dims is always 2 @@ -316,7 +321,7 @@ public: } if (!tiny_unet) { blocks["middle_block.0"] = std::shared_ptr(get_resblock(ch, time_embed_dim, ch)); - if (version != VERSION_SDXL_SSD1B) { + if (version != VERSION_SDXL_SSD1B && version != VERSION_SDXL_VEGA) { blocks["middle_block.1"] = std::shared_ptr(get_attention_layer(ch, n_head, d_head, @@ -517,16 +522,16 @@ public: // middle_block if (!tiny_unet) { h = resblock_forward("middle_block.0", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8] - if (version != VERSION_SDXL_SSD1B) { + if (version != VERSION_SDXL_SSD1B && version != VERSION_SDXL_VEGA) { h = attention_layer_forward("middle_block.1", ctx, h, context, num_video_frames); // [N, 4*model_channels, h/8, w/8] h = resblock_forward("middle_block.2", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8] } } if (controls.size() > 0) { - auto cs = ggml_scale_inplace(ctx->ggml_ctx, controls[controls.size() - 1], control_strength); + auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[controls.size() - 1], control_strength, true); h = ggml_add(ctx->ggml_ctx, h, cs); // middle control } - int control_offset = controls.size() - 2; + int control_offset = static_cast(controls.size() - 2); // output_blocks int output_block_idx = 0; @@ -536,7 +541,7 @@ public: hs.pop_back(); if (controls.size() > 0) { - auto cs = ggml_scale_inplace(ctx->ggml_ctx, controls[control_offset], control_strength); + auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[control_offset], control_strength, true); h_skip = ggml_add(ctx->ggml_ctx, h_skip, cs); // control net condition control_offset--; } @@ -615,7 +620,7 @@ struct UNetModelRunner : public GGMLRunner { struct ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE); if (num_video_frames == -1) { - num_video_frames = x->ne[3]; + num_video_frames = static_cast(x->ne[3]); } x = to_backend(x); @@ -700,12 +705,12 @@ struct UNetModelRunner : public GGMLRunner { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("unet test done in %dms", t1 - t0); + LOG_DEBUG("unet test done in %lldms", t1 - t0); } } }; diff --git a/upscaler.cpp b/src/upscaler.cpp similarity index 98% rename from upscaler.cpp rename to src/upscaler.cpp index 29ac981e..fd0dc824 100644 --- a/upscaler.cpp +++ b/src/upscaler.cpp @@ -89,7 +89,7 @@ struct UpscalerGGML { ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1); auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { - esrgan_upscaler->compute(n_threads, in, &out); + return esrgan_upscaler->compute(n_threads, in, &out); }; int64_t t0 = ggml_time_ms(); sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, on_tiling); diff --git a/util.cpp b/src/util.cpp similarity index 83% rename from util.cpp rename to src/util.cpp index 680ff804..a94cfd98 100644 --- a/util.cpp +++ b/src/util.cpp @@ -95,9 +95,71 @@ bool is_directory(const std::string& path) { return (attributes != INVALID_FILE_ATTRIBUTES && (attributes & FILE_ATTRIBUTE_DIRECTORY)); } +class MmapWrapperImpl : public MmapWrapper { +public: + MmapWrapperImpl(void* data, size_t size, HANDLE hfile, HANDLE hmapping) + : MmapWrapper(data, size), hfile_(hfile), hmapping_(hmapping) {} + + ~MmapWrapperImpl() override { + UnmapViewOfFile(data_); + CloseHandle(hmapping_); + CloseHandle(hfile_); + } + +private: + HANDLE hfile_; + HANDLE hmapping_; +}; + +std::unique_ptr MmapWrapper::create(const std::string& filename) { + void* mapped_data = nullptr; + size_t file_size = 0; + + HANDLE file_handle = CreateFileA( + filename.c_str(), + GENERIC_READ, + FILE_SHARE_READ, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + + if (file_handle == INVALID_HANDLE_VALUE) { + return nullptr; + } + + LARGE_INTEGER size; + if (!GetFileSizeEx(file_handle, &size)) { + CloseHandle(file_handle); + return nullptr; + } + + file_size = static_cast(size.QuadPart); + + HANDLE mapping_handle = CreateFileMapping(file_handle, NULL, PAGE_READONLY, 0, 0, NULL); + + if (mapping_handle == NULL) { + CloseHandle(file_handle); + return nullptr; + } + + mapped_data = MapViewOfFile(mapping_handle, FILE_MAP_READ, 0, 0, file_size); + + if (mapped_data == NULL) { + CloseHandle(mapping_handle); + CloseHandle(file_handle); + return nullptr; + } + + return std::make_unique(mapped_data, file_size, file_handle, mapping_handle); +} + #else // Unix #include +#include +#include #include +#include bool file_exists(const std::string& filename) { struct stat buffer; @@ -109,8 +171,64 @@ bool is_directory(const std::string& path) { return (stat(path.c_str(), &buffer) == 0 && S_ISDIR(buffer.st_mode)); } +class MmapWrapperImpl : public MmapWrapper { +public: + MmapWrapperImpl(void* data, size_t size) + : MmapWrapper(data, size) {} + + ~MmapWrapperImpl() override { + munmap(data_, size_); + } +}; + +std::unique_ptr MmapWrapper::create(const std::string& filename) { + int file_descriptor = open(filename.c_str(), O_RDONLY); + if (file_descriptor == -1) { + return nullptr; + } + + int mmap_flags = MAP_PRIVATE; + +#ifdef __linux__ + // performance flags used by llama.cpp + // posix_fadvise(file_descriptor, 0, 0, POSIX_FADV_SEQUENTIAL); + // mmap_flags |= MAP_POPULATE; #endif + struct stat sb; + if (fstat(file_descriptor, &sb) == -1) { + close(file_descriptor); + return nullptr; + } + + size_t file_size = sb.st_size; + + void* mapped_data = mmap(NULL, file_size, PROT_READ, mmap_flags, file_descriptor, 0); + + close(file_descriptor); + + if (mapped_data == MAP_FAILED) { + return nullptr; + } + +#ifdef __linux__ + // performance flags used by llama.cpp + // posix_madvise(mapped_data, file_size, POSIX_MADV_WILLNEED); +#endif + + return std::make_unique(mapped_data, file_size); +} + +#endif + +bool MmapWrapper::copy_data(void* buf, size_t n, size_t offset) const { + if (offset >= size_ || n > (size_ - offset)) { + return false; + } + std::memcpy(buf, data() + offset, n); + return true; +} + // get_num_physical_cores is copy from // https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp // LICENSE: https://github.com/ggerganov/llama.cpp/blob/master/LICENSE @@ -370,7 +488,7 @@ sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image) { // Allocate memory for float data converted_image.data = (float*)malloc(image.width * image.height * image.channel * sizeof(float)); - for (int i = 0; i < image.width * image.height * image.channel; i++) { + for (uint32_t i = 0; i < image.width * image.height * image.channel; i++) { // Convert uint8_t to float converted_image.data[i] = (float)image.data[i]; } @@ -402,7 +520,7 @@ sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int uint32_t x2 = std::min(x1 + 1, image.width - 1); uint32_t y2 = std::min(y1 + 1, image.height - 1); - for (int k = 0; k < image.channel; k++) { + for (uint32_t k = 0; k < image.channel; k++) { float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k); float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k); float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k); @@ -422,9 +540,9 @@ sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int } void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]) { - for (int y = 0; y < image.height; y++) { - for (int x = 0; x < image.width; x++) { - for (int k = 0; k < image.channel; k++) { + for (uint32_t y = 0; y < image.height; y++) { + for (uint32_t x = 0; x < image.width; x++) { + for (uint32_t k = 0; k < image.channel; k++) { int index = (y * image.width + x) * image.channel + k; image.data[index] = (image.data[index] - means[k]) / stds[k]; } @@ -433,8 +551,8 @@ void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3 } // Constants for means and std -float means[3] = {0.48145466, 0.4578275, 0.40821073}; -float stds[3] = {0.26862954, 0.26130258, 0.27577711}; +float means[3] = {0.48145466f, 0.4578275f, 0.40821073f}; +float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f}; // Function to clip and preprocess sd_image_f32_t sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height) { @@ -458,7 +576,7 @@ sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int targe uint32_t x2 = std::min(x1 + 1, image.width - 1); uint32_t y2 = std::min(y1 + 1, image.height - 1); - for (int k = 0; k < image.channel; k++) { + for (uint32_t k = 0; k < image.channel; k++) { float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k); float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k); float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k); @@ -484,11 +602,11 @@ sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int targe result.channel = image.channel; result.data = (float*)malloc(target_height * target_width * image.channel * sizeof(float)); - for (int k = 0; k < image.channel; k++) { - for (int i = 0; i < result.height; i++) { - for (int j = 0; j < result.width; j++) { - int src_y = std::min(i + h_offset, resized_height - 1); - int src_x = std::min(j + w_offset, resized_width - 1); + for (uint32_t k = 0; k < image.channel; k++) { + for (uint32_t i = 0; i < result.height; i++) { + for (uint32_t j = 0; j < result.width; j++) { + int src_y = std::min(static_cast(i + h_offset), resized_height - 1); + int src_x = std::min(static_cast(j + w_offset), resized_width - 1); *(result.data + i * result.width * image.channel + j * image.channel + k) = fmin(fmax(*(resized_data + src_y * resized_width * image.channel + src_x * image.channel + k), 0.0f), 255.0f) / 255.0f; } @@ -499,9 +617,9 @@ sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int targe free(resized_data); // Normalize - for (int k = 0; k < image.channel; k++) { - for (int i = 0; i < result.height; i++) { - for (int j = 0; j < result.width; j++) { + for (uint32_t k = 0; k < image.channel; k++) { + for (uint32_t i = 0; i < result.height; i++) { + for (uint32_t j = 0; j < result.width; j++) { // *(result.data + i * size * image.channel + j * image.channel + k) = 0.5f; int offset = i * result.width * image.channel + j * image.channel + k; float value = *(result.data + offset); diff --git a/util.h b/src/util.h similarity index 77% rename from util.h rename to src/util.h index dd4a0c30..7dee7bf5 100644 --- a/util.h +++ b/src/util.h @@ -2,6 +2,7 @@ #define __UTIL_H__ #include +#include #include #include @@ -43,6 +44,28 @@ sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height); +class MmapWrapper { +public: + static std::unique_ptr create(const std::string& filename); + + virtual ~MmapWrapper() = default; + + MmapWrapper(const MmapWrapper&) = delete; + MmapWrapper& operator=(const MmapWrapper&) = delete; + MmapWrapper(MmapWrapper&&) = delete; + MmapWrapper& operator=(MmapWrapper&&) = delete; + + const uint8_t* data() const { return static_cast(data_); } + size_t size() const { return size_; } + bool copy_data(void* buf, size_t n, size_t offset) const; + +protected: + MmapWrapper(void* data, size_t size) + : data_(data), size_(size) {} + void* data_ = nullptr; + size_t size_ = 0; +}; + std::string path_join(const std::string& p1, const std::string& p2); std::vector split_string(const std::string& str, char delimiter); void pretty_progress(int step, int steps, float time); diff --git a/vae.hpp b/src/vae.hpp similarity index 96% rename from vae.hpp rename to src/vae.hpp index ad5db1b5..7ccba6ee 100644 --- a/vae.hpp +++ b/src/vae.hpp @@ -1,8 +1,7 @@ #ifndef __VAE_HPP__ #define __VAE_HPP__ -#include "common.hpp" -#include "ggml_extend.hpp" +#include "common_block.hpp" /*================================================== AutoEncoderKL ===================================================*/ @@ -127,8 +126,6 @@ public: q = q_proj->forward(ctx, h_); // [N, h * w, in_channels] k = k_proj->forward(ctx, h_); // [N, h * w, in_channels] v = v_proj->forward(ctx, h_); // [N, h * w, in_channels] - - v = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, v, 1, 0, 2, 3)); // [N, in_channels, h * w] } else { q = q_proj->forward(ctx, h_); // [N, in_channels, h, w] q = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, q, 1, 2, 0, 3)); // [N, h, w, in_channels] @@ -138,11 +135,12 @@ public: k = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, k, 1, 2, 0, 3)); // [N, h, w, in_channels] k = ggml_reshape_3d(ctx->ggml_ctx, k, c, h * w, n); // [N, h * w, in_channels] - v = v_proj->forward(ctx, h_); // [N, in_channels, h, w] - v = ggml_reshape_3d(ctx->ggml_ctx, v, h * w, c, n); // [N, in_channels, h * w] + v = v_proj->forward(ctx, h_); // [N, in_channels, h, w] + v = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, v, 1, 2, 0, 3)); // [N, h, w, in_channels] + v = ggml_reshape_3d(ctx->ggml_ctx, v, c, h * w, n); // [N, h * w, in_channels] } - h_ = ggml_ext_attention(ctx->ggml_ctx, q, k, v, false); // [N, h * w, in_channels] + h_ = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled); if (use_linear) { h_ = proj_out->forward(ctx, h_); // [N, h * w, in_channels] @@ -166,18 +164,18 @@ public: AE3DConv(int64_t in_channels, int64_t out_channels, std::pair kernel_size, - int64_t video_kernel_size = 3, + int video_kernel_size = 3, std::pair stride = {1, 1}, std::pair padding = {0, 0}, std::pair dilation = {1, 1}, bool bias = true) : Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias) { - int64_t kernel_padding = video_kernel_size / 2; - blocks["time_mix_conv"] = std::shared_ptr(new Conv3dnx1x1(out_channels, - out_channels, - video_kernel_size, - 1, - kernel_padding)); + int kernel_padding = video_kernel_size / 2; + blocks["time_mix_conv"] = std::shared_ptr(new Conv3d(out_channels, + out_channels, + {video_kernel_size, 1, 1}, + {1, 1, 1}, + {kernel_padding, 0, 0})); } struct ggml_tensor* forward(GGMLRunnerContext* ctx, @@ -186,7 +184,7 @@ public: // skip_video always False // x: [N, IC, IH, IW] // result: [N, OC, OH, OW] - auto time_mix_conv = std::dynamic_pointer_cast(blocks["time_mix_conv"]); + auto time_mix_conv = std::dynamic_pointer_cast(blocks["time_mix_conv"]); x = Conv2d::forward(ctx, x); // timesteps = x.shape[0] @@ -254,8 +252,8 @@ public: float alpha = get_alpha(); x = ggml_add(ctx->ggml_ctx, - ggml_scale(ctx->ggml_ctx, x, alpha), - ggml_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha)); + ggml_ext_scale(ctx->ggml_ctx, x, alpha), + ggml_ext_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha)); x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w) x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w @@ -409,8 +407,8 @@ public: z_channels(z_channels), video_decoder(video_decoder), video_kernel_size(video_kernel_size) { - size_t num_resolutions = ch_mult.size(); - int block_in = ch * ch_mult[num_resolutions - 1]; + int num_resolutions = static_cast(ch_mult.size()); + int block_in = ch * ch_mult[num_resolutions - 1]; blocks["conv_in"] = std::shared_ptr(new Conv2d(z_channels, block_in, {3, 3}, {1, 1}, {1, 1})); @@ -461,7 +459,7 @@ public: h = mid_block_2->forward(ctx, h); // [N, block_in, h, w] // upsampling - size_t num_resolutions = ch_mult.size(); + int num_resolutions = static_cast(ch_mult.size()); for (int i = num_resolutions - 1; i >= 0; i--) { for (int j = 0; j < num_res_blocks + 1; j++) { std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j); @@ -745,12 +743,12 @@ struct AutoEncoderKL : public VAE { print_ggml_tensor(x); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("encode test done in %dms", t1 - t0); + LOG_DEBUG("encode test done in %lldms", t1 - t0); } if (false) { @@ -763,12 +761,12 @@ struct AutoEncoderKL : public VAE { print_ggml_tensor(z); struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, z, true, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("decode test done in %dms", t1 - t0); + LOG_DEBUG("decode test done in %lldms", t1 - t0); } }; }; diff --git a/version.cpp b/src/version.cpp similarity index 100% rename from version.cpp rename to src/version.cpp diff --git a/vocab.hpp b/src/vocab/clip_t5.hpp similarity index 99% rename from vocab.hpp rename to src/vocab/clip_t5.hpp index 3902045e..8ba40783 100644 --- a/vocab.hpp +++ b/src/vocab/clip_t5.hpp @@ -1,4 +1,4 @@ -static unsigned char merges_utf8_c_str[] = { +static const unsigned char clip_merges_utf8_c_str[] = { 0x23, 0x76, 0x65, @@ -524620,7 +524620,7 @@ static unsigned char merges_utf8_c_str[] = { 0x0a, }; -static unsigned char t5_tokenizer_json_str[] = { +static const unsigned char t5_tokenizer_json_str[] = { 0x7b, 0x0a, 0x20, diff --git a/vocab_mistral.hpp b/src/vocab/mistral.hpp similarity index 99% rename from vocab_mistral.hpp rename to src/vocab/mistral.hpp index 3eb8b259..5bfa873b 100644 --- a/vocab_mistral.hpp +++ b/src/vocab/mistral.hpp @@ -1,4 +1,4 @@ -unsigned char mistral_merges_utf8_c_str[] = { +static const unsigned char mistral_merges_utf8_c_str[] = { 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0x20, 0x74, 0x0a, 0x65, 0x20, 0x72, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, @@ -260614,7 +260614,7 @@ unsigned char mistral_merges_utf8_c_str[] = { 0xc3, 0xa5, 0xc4, 0xb2, 0xc4, 0xb0, 0x20, 0xc3, 0xa6, 0xc2, 0xb1, 0xc4, 0xab, 0xc3, 0xa4, 0xc2, 0xb9, 0xc2, 0xa6, 0x0a, }; -unsigned char mistral_vocab_json_utf8_c_str[] = { +static const unsigned char mistral_vocab_json_utf8_c_str[] = { 0x7b, 0x22, 0x3c, 0x75, 0x6e, 0x6b, 0x3e, 0x22, 0x3a, 0x20, 0x30, 0x2c, 0x20, 0x22, 0x3c, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x31, 0x2c, 0x20, 0x22, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x32, 0x2c, 0x20, 0x22, 0x5b, diff --git a/vocab_qwen.hpp b/src/vocab/qwen.hpp similarity index 99% rename from vocab_qwen.hpp rename to src/vocab/qwen.hpp index 2c5c7fe8..9db2339e 100644 --- a/vocab_qwen.hpp +++ b/src/vocab/qwen.hpp @@ -1,4 +1,4 @@ -unsigned char qwen2_merges_utf8_c_str[] = { +static const unsigned char qwen2_merges_utf8_c_str[] = { 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0xc4, 0xa0, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0x74, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, diff --git a/vocab_umt5.hpp b/src/vocab/umt5.hpp similarity index 99% rename from vocab_umt5.hpp rename to src/vocab/umt5.hpp index 22c581d5..a9f87a20 100644 --- a/vocab_umt5.hpp +++ b/src/vocab/umt5.hpp @@ -1,4 +1,4 @@ -unsigned char umt5_tokenizer_json_str[] = { +static const unsigned char umt5_tokenizer_json_str[] = { 0x7b, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x2c, 0x20, 0x22, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20, 0x6e, 0x75, 0x6c, diff --git a/src/vocab/vocab.cpp b/src/vocab/vocab.cpp new file mode 100644 index 00000000..63b28686 --- /dev/null +++ b/src/vocab/vocab.cpp @@ -0,0 +1,35 @@ +#include "vocab.h" +#include "clip_t5.hpp" +#include "mistral.hpp" +#include "qwen.hpp" +#include "umt5.hpp" + +std::string load_clip_merges() { + std::string merges_utf8_str(reinterpret_cast(clip_merges_utf8_c_str), sizeof(clip_merges_utf8_c_str)); + return merges_utf8_str; +} + +std::string load_qwen2_merges() { + std::string merges_utf8_str(reinterpret_cast(qwen2_merges_utf8_c_str), sizeof(qwen2_merges_utf8_c_str)); + return merges_utf8_str; +} + +std::string load_mistral_merges() { + std::string merges_utf8_str(reinterpret_cast(mistral_merges_utf8_c_str), sizeof(mistral_merges_utf8_c_str)); + return merges_utf8_str; +} + +std::string load_mistral_vocab_json() { + std::string json_str(reinterpret_cast(mistral_vocab_json_utf8_c_str), sizeof(mistral_vocab_json_utf8_c_str)); + return json_str; +} + +std::string load_t5_tokenizer_json() { + std::string json_str(reinterpret_cast(t5_tokenizer_json_str), sizeof(t5_tokenizer_json_str)); + return json_str; +} + +std::string load_umt5_tokenizer_json() { + std::string json_str(reinterpret_cast(umt5_tokenizer_json_str), sizeof(umt5_tokenizer_json_str)); + return json_str; +} \ No newline at end of file diff --git a/src/vocab/vocab.h b/src/vocab/vocab.h new file mode 100644 index 00000000..cfa033a4 --- /dev/null +++ b/src/vocab/vocab.h @@ -0,0 +1,13 @@ +#ifndef __VOCAB_H__ +#define __VOCAB_H__ + +#include + +std::string load_clip_merges(); +std::string load_qwen2_merges(); +std::string load_mistral_merges(); +std::string load_mistral_vocab_json(); +std::string load_t5_tokenizer_json(); +std::string load_umt5_tokenizer_json(); + +#endif // __VOCAB_H__ \ No newline at end of file diff --git a/wan.hpp b/src/wan.hpp similarity index 96% rename from wan.hpp rename to src/wan.hpp index 75333bfe..d94fbd48 100644 --- a/wan.hpp +++ b/src/wan.hpp @@ -5,9 +5,8 @@ #include #include -#include "common.hpp" +#include "common_block.hpp" #include "flux.hpp" -#include "ggml_extend.hpp" #include "rope.hpp" #include "vae.hpp" @@ -75,7 +74,7 @@ namespace WAN { lp2 -= (int)cache_x->ne[2]; } - x = ggml_pad_ext(ctx->ggml_ctx, x, lp0, rp0, lp1, rp1, lp2, rp2, 0, 0); + x = ggml_ext_pad_ext(ctx->ggml_ctx, x, lp0, rp0, lp1, rp1, lp2, rp2, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); return ggml_ext_conv_3d(ctx->ggml_ctx, x, w, b, in_channels, std::get<2>(stride), std::get<1>(stride), std::get<0>(stride), 0, 0, 0, @@ -108,7 +107,7 @@ namespace WAN { struct ggml_tensor* w = params["gamma"]; w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w)); auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC] - h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12); + h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f); h = ggml_mul(ctx->ggml_ctx, h, w); h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, h, 1, 2, 3, 0)); @@ -206,9 +205,9 @@ namespace WAN { } else if (mode == "upsample3d") { x = ggml_upscale(ctx->ggml_ctx, x, 2, GGML_SCALE_MODE_NEAREST); } else if (mode == "downsample2d") { - x = ggml_pad(ctx->ggml_ctx, x, 1, 1, 0, 0); + x = ggml_ext_pad(ctx->ggml_ctx, x, 1, 1, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); } else if (mode == "downsample3d") { - x = ggml_pad(ctx->ggml_ctx, x, 1, 1, 0, 0); + x = ggml_ext_pad(ctx->ggml_ctx, x, 1, 1, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); } x = resample_1->forward(ctx, x); x = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 0, 1, 3, 2)); // (c, t, h, w) @@ -243,13 +242,13 @@ namespace WAN { protected: int64_t in_channels; int64_t out_channels; - int64_t factor_t; - int64_t factor_s; - int64_t factor; + int factor_t; + int factor_s; + int factor; int64_t group_size; public: - AvgDown3D(int64_t in_channels, int64_t out_channels, int64_t factor_t, int64_t factor_s = 1) + AvgDown3D(int64_t in_channels, int64_t out_channels, int factor_t, int factor_s = 1) : in_channels(in_channels), out_channels(out_channels), factor_t(factor_t), factor_s(factor_s) { factor = factor_t * factor_s * factor_s; GGML_ASSERT(in_channels * factor % out_channels == 0); @@ -266,7 +265,7 @@ namespace WAN { int64_t H = x->ne[1]; int64_t W = x->ne[0]; - int64_t pad_t = (factor_t - T % factor_t) % factor_t; + int pad_t = (factor_t - T % factor_t) % factor_t; x = ggml_pad_ext(ctx->ggml_ctx, x, 0, 0, 0, 0, pad_t, 0, 0, 0); T = x->ne[2]; @@ -572,9 +571,8 @@ namespace WAN { auto v = qkv_vec[2]; v = ggml_reshape_3d(ctx->ggml_ctx, v, h * w, c, n); // [t, c, h * w] - x = ggml_ext_attention(ctx->ggml_ctx, q, k, v, false); // [t, h * w, c] - // v = ggml_cont(ctx, ggml_ext_torch_permute(ctx, v, 1, 0, 2, 3)); // [t, h * w, c] - // x = ggml_ext_attention_ext(ctx, q, k, v, q->ne[2], nullptr, false, false, true); + v = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, v, 1, 0, 2, 3)); // [t, h * w, c] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled); // [t, h * w, c] x = ggml_ext_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 1, 0, 2, 3)); // [t, c, h * w] x = ggml_reshape_4d(ctx->ggml_ctx, x, w, h, c, n); // [t, c, h, w] @@ -1071,7 +1069,7 @@ namespace WAN { int64_t iter_ = z->ne[2]; auto x = conv2->forward(ctx, z); struct ggml_tensor* out; - for (int64_t i = 0; i < iter_; i++) { + for (int i = 0; i < iter_; i++) { _conv_idx = 0; if (i == 0) { auto in = ggml_ext_slice(ctx->ggml_ctx, x, 2, i, i + 1); // [b*c, 1, h, w] @@ -1091,7 +1089,7 @@ namespace WAN { struct ggml_tensor* decode_partial(GGMLRunnerContext* ctx, struct ggml_tensor* z, - int64_t i, + int i, int64_t b = 1) { // z: [b*c, t, h, w] GGML_ASSERT(b == 1); @@ -1146,12 +1144,12 @@ namespace WAN { return gf; } - struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int64_t i) { + struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int i) { struct ggml_cgraph* gf = new_graph_custom(20480); ae.clear_cache(); - for (int64_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { + for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { auto feat_cache = get_cache_tensor_by_name("feat_idx:" + std::to_string(feat_idx)); ae._feat_map[feat_idx] = feat_cache; } @@ -1162,7 +1160,7 @@ namespace WAN { struct ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z); - for (int64_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { + for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { ggml_tensor* feat_cache = ae._feat_map[feat_idx]; if (feat_cache != nullptr) { cache("feat_idx:" + std::to_string(feat_idx), feat_cache); @@ -1188,7 +1186,7 @@ namespace WAN { } else { // chunk 1 result is weird ae.clear_cache(); int64_t t = z->ne[2]; - int64_t i = 0; + int i = 0; auto get_graph = [&]() -> struct ggml_cgraph* { return build_graph_partial(z, decode_graph, i); }; @@ -1394,7 +1392,7 @@ namespace WAN { k = norm_k->forward(ctx, k); auto v = v_proj->forward(ctx, context); // [N, n_context, dim] - x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] x = o_proj->forward(ctx, x); // [N, n_token, dim] return x; @@ -1443,11 +1441,8 @@ namespace WAN { int64_t dim = x->ne[0]; int64_t context_txt_len = context->ne[1] - context_img_len; - context = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, context, 0, 2, 1, 3)); // [context_img_len + context_txt_len, N, dim] - auto context_img = ggml_view_3d(ctx->ggml_ctx, context, dim, N, context_img_len, context->nb[1], context->nb[2], 0); - auto context_txt = ggml_view_3d(ctx->ggml_ctx, context, dim, N, context_txt_len, context->nb[1], context->nb[2], context_img_len * context->nb[2]); - context_img = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, context_img, 0, 2, 1, 3)); // [N, context_img_len, dim] - context_txt = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, context_txt, 0, 2, 1, 3)); // [N, context_txt_len, dim] + auto context_img = ggml_view_3d(ctx->ggml_ctx, context, dim, context_img_len, N, context->nb[1], context->nb[2], 0); // [N, context_img_len, dim] + auto context_txt = ggml_view_3d(ctx->ggml_ctx, context, dim, context_txt_len, N, context->nb[1], context->nb[2], context_img_len * context->nb[1]); // [N, context_txt_len, dim] auto q = q_proj->forward(ctx, x); q = norm_q->forward(ctx, q); @@ -1459,8 +1454,8 @@ namespace WAN { k_img = norm_k_img->forward(ctx, k_img); auto v_img = v_img_proj->forward(ctx, context_img); // [N, context_img_len, dim] - auto img_x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k_img, v_img, num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] - x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, nullptr, false, false, ctx->flash_attn_enabled); // [N, n_token, dim] + auto img_x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k_img, v_img, num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] + x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] x = ggml_add(ctx->ggml_ctx, x, img_x); @@ -1499,7 +1494,7 @@ namespace WAN { class WanAttentionBlock : public GGMLBlock { protected: - int dim; + int64_t dim; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); @@ -1577,7 +1572,7 @@ namespace WAN { y = modulate_add(ctx->ggml_ctx, y, es[3]); y = ffn_0->forward(ctx, y); - y = ggml_gelu_inplace(ctx->ggml_ctx, y); + y = ggml_ext_gelu(ctx->ggml_ctx, y, true); y = ffn_2->forward(ctx, y); x = ggml_add(ctx->ggml_ctx, x, modulate_mul(ctx->ggml_ctx, y, es[5])); @@ -1639,7 +1634,7 @@ namespace WAN { class Head : public GGMLBlock { protected: - int dim; + int64_t dim; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); @@ -1685,8 +1680,8 @@ namespace WAN { class MLPProj : public GGMLBlock { protected: - int in_dim; - int flf_pos_embed_token_number; + int64_t in_dim; + int64_t flf_pos_embed_token_number; void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { if (flf_pos_embed_token_number > 0) { @@ -1724,7 +1719,7 @@ namespace WAN { auto x = proj_0->forward(ctx, image_embeds); x = proj_1->forward(ctx, x); - x = ggml_gelu_inplace(ctx->ggml_ctx, x); + x = ggml_ext_gelu(ctx->ggml_ctx, x, true); x = proj_3->forward(ctx, x); x = proj_4->forward(ctx, x); @@ -1739,17 +1734,17 @@ namespace WAN { int64_t in_dim = 16; int64_t dim = 2048; int64_t ffn_dim = 8192; - int64_t freq_dim = 256; + int freq_dim = 256; int64_t text_dim = 4096; int64_t out_dim = 16; int64_t num_heads = 16; - int64_t num_layers = 32; - int64_t vace_layers = 0; + int num_layers = 32; + int vace_layers = 0; int64_t vace_in_dim = 96; std::map vace_layers_mapping = {}; bool qk_norm = true; bool cross_attn_norm = true; - float eps = 1e-6; + float eps = 1e-6f; int64_t flf_pos_embed_token_number = 0; int theta = 10000; // wan2.1 1.3B: 1536/12, wan2.1/2.2 14B: 5120/40, wan2.2 5B: 3074/24 @@ -1826,7 +1821,7 @@ namespace WAN { } } - struct ggml_tensor* pad_to_patch_size(struct ggml_context* ctx, + struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, struct ggml_tensor* x) { int64_t W = x->ne[0]; int64_t H = x->ne[1]; @@ -1835,8 +1830,7 @@ namespace WAN { int pad_t = (std::get<0>(params.patch_size) - T % std::get<0>(params.patch_size)) % std::get<0>(params.patch_size); int pad_h = (std::get<1>(params.patch_size) - H % std::get<1>(params.patch_size)) % std::get<1>(params.patch_size); int pad_w = (std::get<2>(params.patch_size) - W % std::get<2>(params.patch_size)) % std::get<2>(params.patch_size); - x = ggml_pad(ctx, x, pad_w, pad_h, pad_t, 0); // [N*C, T + pad_t, H + pad_h, W + pad_w] - + ggml_ext_pad(ctx->ggml_ctx, x, pad_w, pad_h, pad_t, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); return x; } @@ -1912,7 +1906,7 @@ namespace WAN { e0 = ggml_reshape_4d(ctx->ggml_ctx, e0, e0->ne[0] / 6, 6, e0->ne[1], e0->ne[2]); // [N, 6, dim] or [N, T, 6, dim] context = text_embedding_0->forward(ctx, context); - context = ggml_gelu(ctx->ggml_ctx, context); + context = ggml_ext_gelu(ctx->ggml_ctx, context); context = text_embedding_2->forward(ctx, context); // [N, context_txt_len, dim] int64_t context_img_len = 0; @@ -1951,7 +1945,7 @@ namespace WAN { auto result = vace_block->forward(ctx, c, x_orig, e0, pe, context, context_img_len); auto c_skip = result.first; c = result.second; - c_skip = ggml_scale(ctx->ggml_ctx, c_skip, vace_strength); + c_skip = ggml_ext_scale(ctx->ggml_ctx, c_skip, vace_strength); x = ggml_add(ctx->ggml_ctx, x, c_skip); } } @@ -1986,14 +1980,14 @@ namespace WAN { int64_t T = x->ne[2]; int64_t C = x->ne[3]; - x = pad_to_patch_size(ctx->ggml_ctx, x); + x = pad_to_patch_size(ctx, x); int64_t t_len = ((T + (std::get<0>(params.patch_size) / 2)) / std::get<0>(params.patch_size)); int64_t h_len = ((H + (std::get<1>(params.patch_size) / 2)) / std::get<1>(params.patch_size)); int64_t w_len = ((W + (std::get<2>(params.patch_size) / 2)) / std::get<2>(params.patch_size)); if (time_dim_concat != nullptr) { - time_dim_concat = pad_to_patch_size(ctx->ggml_ctx, time_dim_concat); + time_dim_concat = pad_to_patch_size(ctx, time_dim_concat); x = ggml_concat(ctx->ggml_ctx, x, time_dim_concat, 2); // [N*C, (T+pad_t) + (T2+pad_t2), H + pad_h, W + pad_w] t_len = ((x->ne[2] + (std::get<0>(params.patch_size) / 2)) / std::get<0>(params.patch_size)); } @@ -2067,7 +2061,7 @@ namespace WAN { if (version == VERSION_WAN2_2_TI2V) { desc = "Wan2.2-TI2V-5B"; wan_params.dim = 3072; - wan_params.eps = 1e-06; + wan_params.eps = 1e-06f; wan_params.ffn_dim = 14336; wan_params.freq_dim = 256; wan_params.in_dim = 48; @@ -2086,7 +2080,7 @@ namespace WAN { wan_params.in_dim = 16; } wan_params.dim = 1536; - wan_params.eps = 1e-06; + wan_params.eps = 1e-06f; wan_params.ffn_dim = 8960; wan_params.freq_dim = 256; wan_params.num_heads = 12; @@ -2115,14 +2109,14 @@ namespace WAN { } } wan_params.dim = 5120; - wan_params.eps = 1e-06; + wan_params.eps = 1e-06f; wan_params.ffn_dim = 13824; wan_params.freq_dim = 256; wan_params.num_heads = 40; wan_params.out_dim = 16; wan_params.text_len = 512; } else { - GGML_ABORT("invalid num_layers(%ld) of wan", wan_params.num_layers); + GGML_ABORT("invalid num_layers(%d) of wan", wan_params.num_layers); } LOG_INFO("%s", desc.c_str()); @@ -2157,16 +2151,16 @@ namespace WAN { time_dim_concat = to_backend(time_dim_concat); vace_context = to_backend(vace_context); - pe_vec = Rope::gen_wan_pe(x->ne[2], - x->ne[1], - x->ne[0], + pe_vec = Rope::gen_wan_pe(static_cast(x->ne[2]), + static_cast(x->ne[1]), + static_cast(x->ne[0]), std::get<0>(wan_params.patch_size), std::get<1>(wan_params.patch_size), std::get<2>(wan_params.patch_size), 1, wan_params.theta, wan_params.axes_dim); - int pos_len = pe_vec.size() / wan_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / wan_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, wan_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -2244,12 +2238,12 @@ namespace WAN { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("wan test done in %dms", t1 - t0); + LOG_DEBUG("wan test done in %lldms", t1 - t0); } } diff --git a/z_image.hpp b/src/z_image.hpp similarity index 82% rename from z_image.hpp rename to src/z_image.hpp index bc554f17..8f405a59 100644 --- a/z_image.hpp +++ b/src/z_image.hpp @@ -54,15 +54,37 @@ namespace ZImage { auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] - qkv = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, qkv, 0, 2, 3, 1)); // [num_heads + num_kv_heads*2, N, n_token, head_dim] - auto q = ggml_view_4d(ctx->ggml_ctx, qkv, qkv->ne[0], qkv->ne[1], qkv->ne[2], num_heads, qkv->nb[1], qkv->nb[2], qkv->nb[3], 0); // [num_heads, N, n_token, head_dim] - auto k = ggml_view_4d(ctx->ggml_ctx, qkv, qkv->ne[0], qkv->ne[1], qkv->ne[2], num_kv_heads, qkv->nb[1], qkv->nb[2], qkv->nb[3], qkv->nb[3] * num_heads); // [num_kv_heads, N, n_token, head_dim] - auto v = ggml_view_4d(ctx->ggml_ctx, qkv, qkv->ne[0], qkv->ne[1], qkv->ne[2], num_kv_heads, qkv->nb[1], qkv->nb[2], qkv->nb[3], qkv->nb[3] * (num_heads + num_kv_heads)); // [num_kv_heads, N, n_token, head_dim] - - q = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, q, 0, 3, 1, 2)); // [N, n_token, num_heads, head_dim] - k = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, k, 0, 3, 1, 2)); // [N, n_token, num_kv_heads, head_dim] - v = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, v, 0, 3, 1, 2)); // [N, n_token, num_kv_heads, head_dim] + auto q = ggml_view_4d(ctx->ggml_ctx, + qkv, + qkv->ne[0], + num_heads, + qkv->ne[2], + qkv->ne[3], + qkv->nb[1], + qkv->nb[2], + qkv->nb[3], + 0); // [N, n_token, num_heads, head_dim] + auto k = ggml_view_4d(ctx->ggml_ctx, + qkv, + qkv->ne[0], + num_kv_heads, + qkv->ne[2], + qkv->ne[3], + qkv->nb[1], + qkv->nb[2], + qkv->nb[3], + num_heads * qkv->nb[1]); // [N, n_token, num_kv_heads, head_dim] + auto v = ggml_view_4d(ctx->ggml_ctx, + qkv, + qkv->ne[0], + num_kv_heads, + qkv->ne[2], + qkv->ne[3], + qkv->nb[1], + qkv->nb[2], + qkv->nb[3], + (num_heads + num_kv_heads) * qkv->nb[1]); // [N, n_token, num_kv_heads, head_dim] if (qk_norm) { auto q_norm = std::dynamic_pointer_cast(blocks["q_norm"]); @@ -239,7 +261,7 @@ namespace ZImage { }; struct ZImageParams { - int64_t patch_size = 2; + int patch_size = 2; int64_t hidden_size = 3840; int64_t in_channels = 16; int64_t out_channels = 16; @@ -249,11 +271,11 @@ namespace ZImage { int64_t num_heads = 30; int64_t num_kv_heads = 30; int64_t multiple_of = 256; - float ffn_dim_multiplier = 8.0 / 3.0f; + float ffn_dim_multiplier = 8.0f / 3.0f; float norm_eps = 1e-5f; bool qk_norm = true; int64_t cap_feat_dim = 2560; - float theta = 256.f; + int theta = 256; std::vector axes_dim = {32, 48, 48}; int64_t axes_dim_sum = 128; }; @@ -324,69 +346,6 @@ namespace ZImage { blocks["final_layer"] = std::make_shared(z_image_params.hidden_size, z_image_params.patch_size, z_image_params.out_channels); } - struct ggml_tensor* pad_to_patch_size(struct ggml_context* ctx, - struct ggml_tensor* x) { - int64_t W = x->ne[0]; - int64_t H = x->ne[1]; - - int pad_h = (z_image_params.patch_size - H % z_image_params.patch_size) % z_image_params.patch_size; - int pad_w = (z_image_params.patch_size - W % z_image_params.patch_size) % z_image_params.patch_size; - x = ggml_pad(ctx, x, pad_w, pad_h, 0, 0); // [N, C, H + pad_h, W + pad_w] - return x; - } - - struct ggml_tensor* patchify(struct ggml_context* ctx, - struct ggml_tensor* x) { - // x: [N, C, H, W] - // return: [N, h*w, patch_size*patch_size*C] - int64_t N = x->ne[3]; - int64_t C = x->ne[2]; - int64_t H = x->ne[1]; - int64_t W = x->ne[0]; - int64_t p = z_image_params.patch_size; - int64_t h = H / z_image_params.patch_size; - int64_t w = W / z_image_params.patch_size; - - GGML_ASSERT(h * p == H && w * p == W); - - x = ggml_reshape_4d(ctx, x, p, w, p, h * C * N); // [N*C*h, p, w, p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, p, p] - x = ggml_reshape_4d(ctx, x, p * p, w * h, C, N); // [N, C, h*w, p*p] - x = ggml_cont(ctx, ggml_ext_torch_permute(ctx, x, 2, 0, 1, 3)); // [N, h*w, C, p*p] - x = ggml_reshape_3d(ctx, x, C * p * p, w * h, N); // [N, h*w, p*p*C] - return x; - } - - struct ggml_tensor* process_img(struct ggml_context* ctx, - struct ggml_tensor* x) { - x = pad_to_patch_size(ctx, x); - x = patchify(ctx, x); - return x; - } - - struct ggml_tensor* unpatchify(struct ggml_context* ctx, - struct ggml_tensor* x, - int64_t h, - int64_t w) { - // x: [N, h*w, patch_size*patch_size*C] - // return: [N, C, H, W] - int64_t N = x->ne[2]; - int64_t C = x->ne[0] / z_image_params.patch_size / z_image_params.patch_size; - int64_t H = h * z_image_params.patch_size; - int64_t W = w * z_image_params.patch_size; - int64_t p = z_image_params.patch_size; - - GGML_ASSERT(C * p * p == x->ne[0]); - - x = ggml_reshape_4d(ctx, x, C, p * p, w * h, N); // [N, h*w, p*p, C] - x = ggml_cont(ctx, ggml_ext_torch_permute(ctx, x, 1, 2, 0, 3)); // [N, C, h*w, p*p] - x = ggml_reshape_4d(ctx, x, p, p, w, h * C * N); // [N*C*h, w, p, p] - x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, p, w, p] - x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*p, w*p] - - return x; - } - struct ggml_tensor* forward_core(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* timestep, @@ -411,13 +370,13 @@ namespace ZImage { auto txt = cap_embedder_1->forward(ctx, cap_embedder_0->forward(ctx, context)); // [N, n_txt_token, hidden_size] auto img = x_embedder->forward(ctx, x); // [N, n_img_token, hidden_size] - int64_t n_txt_pad_token = Rope::bound_mod(n_txt_token, SEQ_MULTI_OF); + int64_t n_txt_pad_token = Rope::bound_mod(static_cast(n_txt_token), SEQ_MULTI_OF); if (n_txt_pad_token > 0) { auto txt_pad_tokens = ggml_repeat_4d(ctx->ggml_ctx, txt_pad_token, txt_pad_token->ne[0], n_txt_pad_token, N, 1); txt = ggml_concat(ctx->ggml_ctx, txt, txt_pad_tokens, 1); // [N, n_txt_token + n_txt_pad_token, hidden_size] } - int64_t n_img_pad_token = Rope::bound_mod(n_img_token, SEQ_MULTI_OF); + int64_t n_img_pad_token = Rope::bound_mod(static_cast(n_img_token), SEQ_MULTI_OF); if (n_img_pad_token > 0) { auto img_pad_tokens = ggml_repeat_4d(ctx->ggml_ctx, img_pad_token, img_pad_token->ne[0], n_img_pad_token, N, 1); img = ggml_concat(ctx->ggml_ctx, img, img_pad_tokens, 1); // [N, n_img_token + n_img_pad_token, hidden_size] @@ -473,29 +432,24 @@ namespace ZImage { int64_t C = x->ne[2]; int64_t N = x->ne[3]; - auto img = process_img(ctx->ggml_ctx, x); + int patch_size = z_image_params.patch_size; + + auto img = DiT::pad_and_patchify(ctx, x, patch_size, patch_size, false); uint64_t n_img_token = img->ne[1]; if (ref_latents.size() > 0) { for (ggml_tensor* ref : ref_latents) { - ref = process_img(ctx->ggml_ctx, ref); + ref = DiT::pad_and_patchify(ctx, ref, patch_size, patch_size, false); img = ggml_concat(ctx->ggml_ctx, img, ref, 1); } } - int64_t h_len = ((H + (z_image_params.patch_size / 2)) / z_image_params.patch_size); - int64_t w_len = ((W + (z_image_params.patch_size / 2)) / z_image_params.patch_size); - auto out = forward_core(ctx, img, timestep, context, pe); - out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, n_img_token); // [N, n_img_token, ph*pw*C] - out = unpatchify(ctx->ggml_ctx, out, h_len, w_len); // [N, C, H + pad_h, W + pad_w] + out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, n_img_token); // [N, n_img_token, ph*pw*C] + out = DiT::unpatchify_and_crop(ctx->ggml_ctx, out, H, W, patch_size, patch_size, false); // [N, C, H, W] - // slice - out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, H); // [N, C, H, W + pad_w] - out = ggml_ext_slice(ctx->ggml_ctx, out, 0, 0, W); // [N, C, H, W] - - out = ggml_scale(ctx->ggml_ctx, out, -1.f); + out = ggml_ext_scale(ctx->ggml_ctx, out, -1.f); return out; } @@ -543,17 +497,19 @@ namespace ZImage { ref_latents[i] = to_backend(ref_latents[i]); } - pe_vec = Rope::gen_z_image_pe(x->ne[1], - x->ne[0], + pe_vec = Rope::gen_z_image_pe(static_cast(x->ne[1]), + static_cast(x->ne[0]), z_image_params.patch_size, - x->ne[3], - context->ne[1], + static_cast(x->ne[3]), + static_cast(context->ne[1]), SEQ_MULTI_OF, ref_latents, increase_ref_index, z_image_params.theta, + circular_y_enabled, + circular_x_enabled, z_image_params.axes_dim); - int pos_len = pe_vec.size() / z_image_params.axes_dim_sum / 2; + int pos_len = static_cast(pe_vec.size() / z_image_params.axes_dim_sum / 2); // LOG_DEBUG("pos_len %d", pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, z_image_params.axes_dim_sum / 2, pos_len); // pe->data = pe_vec.data(); @@ -617,12 +573,12 @@ namespace ZImage { struct ggml_tensor* out = nullptr; - int t0 = ggml_time_ms(); + int64_t t0 = ggml_time_ms(); compute(8, x, timesteps, context, {}, false, &out, work_ctx); - int t1 = ggml_time_ms(); + int64_t t1 = ggml_time_ms(); print_ggml_tensor(out); - LOG_DEBUG("z_image test done in %dms", t1 - t0); + LOG_DEBUG("z_image test done in %lldms", t1 - t0); } } diff --git a/thirdparty/darts.h b/thirdparty/darts.h index 7c25326a..bd535cd5 100644 --- a/thirdparty/darts.h +++ b/thirdparty/darts.h @@ -845,7 +845,7 @@ inline void BitVector::build() { num_ones_ = 0; for (std::size_t i = 0; i < units_.size(); ++i) { - ranks_[i] = num_ones_; + ranks_[i] = static_cast(num_ones_); num_ones_ += pop_count(units_[i]); } } @@ -1769,7 +1769,7 @@ id_type DoubleArrayBuilder::arrange_from_keyset(const Keyset &keyset, inline id_type DoubleArrayBuilder::find_valid_offset(id_type id) const { if (extras_head_ >= units_.size()) { - return units_.size() | (id & LOWER_MASK); + return static_cast(units_.size()) | (id & LOWER_MASK); } id_type unfixed_id = extras_head_; @@ -1781,7 +1781,7 @@ inline id_type DoubleArrayBuilder::find_valid_offset(id_type id) const { unfixed_id = extras(unfixed_id).next(); } while (unfixed_id != extras_head_); - return units_.size() | (id & LOWER_MASK); + return static_cast(units_.size()) | (id & LOWER_MASK); } inline bool DoubleArrayBuilder::is_valid_offset(id_type id, @@ -1812,7 +1812,7 @@ inline void DoubleArrayBuilder::reserve_id(id_type id) { if (id == extras_head_) { extras_head_ = extras(id).next(); if (extras_head_ == id) { - extras_head_ = units_.size(); + extras_head_ = static_cast(units_.size()); } } extras(extras(id).prev()).set_next(extras(id).next()); @@ -1821,8 +1821,8 @@ inline void DoubleArrayBuilder::reserve_id(id_type id) { } inline void DoubleArrayBuilder::expand_units() { - id_type src_num_units = units_.size(); - id_type src_num_blocks = num_blocks(); + id_type src_num_units = static_cast(units_.size()); + id_type src_num_blocks = static_cast(num_blocks()); id_type dest_num_units = src_num_units + BLOCK_SIZE; id_type dest_num_blocks = src_num_blocks + 1; @@ -1834,7 +1834,7 @@ inline void DoubleArrayBuilder::expand_units() { units_.resize(dest_num_units); if (dest_num_blocks > NUM_EXTRA_BLOCKS) { - for (std::size_t id = src_num_units; id < dest_num_units; ++id) { + for (id_type id = src_num_units; id < dest_num_units; ++id) { extras(id).set_is_used(false); extras(id).set_is_fixed(false); } @@ -1858,9 +1858,9 @@ inline void DoubleArrayBuilder::expand_units() { inline void DoubleArrayBuilder::fix_all_blocks() { id_type begin = 0; if (num_blocks() > NUM_EXTRA_BLOCKS) { - begin = num_blocks() - NUM_EXTRA_BLOCKS; + begin = static_cast(num_blocks() - NUM_EXTRA_BLOCKS); } - id_type end = num_blocks(); + id_type end = static_cast(num_blocks()); for (id_type block_id = begin; block_id != end; ++block_id) { fix_block(block_id); diff --git a/thirdparty/stb_image_write.h b/thirdparty/stb_image_write.h index 55118853..9128a313 100644 --- a/thirdparty/stb_image_write.h +++ b/thirdparty/stb_image_write.h @@ -257,6 +257,10 @@ int stbi_write_tga_with_rle = 1; int stbi_write_force_png_filter = -1; #endif +#ifndef STBMIN +#define STBMIN(a, b) ((a) < (b) ? (a) : (b)) +#endif // STBMIN + static int stbi__flip_vertically_on_write = 0; STBIWDEF void stbi_flip_vertically_on_write(int flag) @@ -1179,8 +1183,8 @@ STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int s if (!zlib) return 0; if(parameters != NULL) { - param_length = strlen(parameters); - param_length += strlen("parameters") + 1; // For the name and the null-byte + param_length = (int)strlen(parameters); + param_length += (int)strlen("parameters") + 1; // For the name and the null-byte } // each tag requires 12 bytes of overhead @@ -1526,11 +1530,11 @@ static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, in if(parameters != NULL) { stbiw__putc(s, 0xFF /* comnent */ ); stbiw__putc(s, 0xFE /* marker */ ); - size_t param_length = std::min(2 + strlen("parameters") + 1 + strlen(parameters) + 1, (size_t) 0xFFFF); + int param_length = STBMIN(2 + (int)strlen("parameters") + 1 + (int)strlen(parameters) + 1, 0xFFFF); stbiw__putc(s, param_length >> 8); // no need to mask, length < 65536 stbiw__putc(s, param_length & 0xFF); - s->func(s->context, (void*)"parameters", strlen("parameters") + 1); // std::string is zero-terminated - s->func(s->context, (void*)parameters, std::min(param_length, (size_t) 65534) - 2 - strlen("parameters") - 1); + s->func(s->context, (void*)"parameters", (int)strlen("parameters") + 1); // std::string is zero-terminated + s->func(s->context, (void*)parameters, STBMIN(param_length, 65534) - 2 - (int)strlen("parameters") - 1); if(param_length > 65534) stbiw__putc(s, 0); // always zero-terminate for safety if(param_length & 1) stbiw__putc(s, 0xFF); // pad to even length }