Compare commits

...

41 Commits

Author SHA1 Message Date
leejet
f16a110f87
refactor: migrate generation pipeline to sd::Tensor (#1373) 2026-03-30 00:19:25 +08:00
stduhpf
ed88e215a2
refactor: simplify f8_e5m2_to_f16 function a little bit (#1358) 2026-03-30 00:14:33 +08:00
Wagner Bruna
6293ab5aaf
docs: update Spectrum info about DiT models (#1360) 2026-03-30 00:12:57 +08:00
leejet
545fac4f3f
refactor: simplify sample cache flow (#1350) 2026-03-17 00:28:03 +08:00
Tay
5265a5efa1
perf(z-image): switch to fused SwiGLU kernel (#1302) 2026-03-17 00:27:46 +08:00
leejet
84cbd88df1
style: remove redundant struct qualifiers for consistent C/C++ type usage (#1349) 2026-03-16 22:17:22 +08:00
Daniele
997bb11fb6
fix: correct encoder channels for flux2 (#1346) 2026-03-16 22:16:43 +08:00
leejet
862a6586cb
feat: add embedded WebUI (#1207) 2026-03-16 00:26:57 +08:00
leejet
61d8331ef3 ci: avoid cuda docker build timeout by using -j16 2026-03-15 18:39:29 +08:00
leejet
acc3bf1fdc
refactor: optimize the VAE architecture (#1345) 2026-03-15 16:57:42 +08:00
Kevin Nause
83eabd7c01
ci: add CUDA Dockerfile (#1314) 2026-03-15 16:46:01 +08:00
Wagner Bruna
630ee03f23
refactor: move all cache parameter defaults to the library (#1327) 2026-03-15 16:43:46 +08:00
Wagner Bruna
f6968bc589
chore: remove SD_FAST_SOFTMAX build flag (#1338) 2026-03-15 16:42:47 +08:00
rmatif
adfef62900
feat: add generic DiT support to spectrum cache (#1336) 2026-03-15 16:41:05 +08:00
JusteLeo
6fa7ca9317
docs: add Anima2 gguf download link to anima.md (#1335) 2026-03-15 16:40:14 +08:00
leejet
d6dd6d7b55
refactor: remove ununsed encode_video (#1332) 2026-03-10 00:36:09 +08:00
rmatif
dea4980f4e
feat: add spectrum caching method (#1322) 2026-03-10 00:35:32 +08:00
leejet
c8fb3d2458
fix: resolve SD1 Pix2Pix issue (#1329) 2026-03-08 00:28:05 +08:00
stduhpf
3d33caaef8
fix: make tiling work better when using circular (#1299) 2026-03-08 00:25:07 +08:00
WinkelCode
9b424db0f4
ci: change workflow owner of "actions-commit-hash" from "pr-mpt" to "prompt" (#1323) 2026-03-08 00:23:23 +08:00
rmatif
d95062737e
fix: ucache: normalize reuse error (#1313) 2026-03-04 23:50:45 +08:00
Korsar13
7c880f80c7
fix: avoid sd-server memory leak (#1316) 2026-03-04 23:47:38 +08:00
leejet
aaa8a51bd8 docs: update sd-cli/sd-server docs 2026-03-04 00:41:17 +08:00
leejet
ba35dd734e
refactor: introduce ggml_ext_zeros_like/ggml_ext_ones_like (#1312) 2026-03-04 00:36:52 +08:00
bssrdf
d41f5fff69
perf: improved flux attention qkv unpacking (#1306) 2026-03-04 00:36:32 +08:00
Korsar13
810ef0cf76
fix: reset weight adapter for models if no loras in request (#1307) 2026-03-04 00:34:07 +08:00
leejet
5792c66879
feat: support some non-standard Anima weight names (#1305) 2026-03-01 22:01:29 +08:00
Wagner Bruna
39d54702a6
feat: accept legacy image parameter on v1/images/edits (#1270) 2026-03-01 22:00:50 +08:00
Wagner Bruna
60889bc9a1
fix: correct sdapi LoRA file handling (#1276) 2026-03-01 21:57:06 +08:00
leejet
e64baa3611
refactor: reuse DiT's patchify/unpatchify functions (#1304) 2026-03-01 21:44:51 +08:00
leejet
cec4aedcfd docs: add anima docs 2026-03-01 15:32:25 +08:00
rmatif
4cdfff5ff2
feat: add Anima support (#1296) 2026-03-01 15:23:18 +08:00
leejet
0752cc9d3a
fix: resolve image quality degradation issue (#1297) 2026-02-26 00:26:21 +08:00
Wagner Bruna
b314d80ad0
feat: turn flow_shift into a generation parameter (#1289)
* feat: turn flow_shift into a generation parameter

* format code

* simplify set_shift/set_parameters

* fix sd_sample_params_to_str

* remove unused variable

* update docs

---------

Co-authored-by: leejet <leejet714@gmail.com>
2026-02-26 00:26:04 +08:00
leejet
c9cd49701a
fix: safely handle whitespace and consecutive newlines (#1288) 2026-02-19 20:54:42 +08:00
akleine
c5eb1e4137
fix: avoid black images if using an invalid VAE (for SDXL) (#1273) 2026-02-19 20:54:18 +08:00
leejet
636d3cb6ff
refactor: reorganize the vocab file structure (#1271) 2026-02-11 00:44:17 +08:00
Wagner Bruna
adea272225
feat(server): use image and command-line dimensions by default on server (#1262) 2026-02-11 00:42:50 +08:00
Mario Limonciello
45ce78a3ae
ci: correct rocm artifact of linux (#1269) 2026-02-10 23:19:28 +08:00
leejet
28ef93c0e1
refactor: reorganize the file structure (#1266) 2026-02-10 23:13:35 +08:00
leejet
3296545090
feat: add extra_c_crossattns support for llm embedder (#1265) 2026-02-10 00:00:17 +08:00
80 changed files with 14671 additions and 12381 deletions

View File

@ -21,11 +21,13 @@ on:
"**/*.c", "**/*.c",
"**/*.cpp", "**/*.cpp",
"**/*.cu", "**/*.cu",
"examples/server/frontend/**",
] ]
pull_request: pull_request:
types: [opened, synchronize, reopened] types: [opened, synchronize, reopened]
paths: paths:
[ [
".github/workflows/**",
"**/CMakeLists.txt", "**/CMakeLists.txt",
"**/Makefile", "**/Makefile",
"**/*.h", "**/*.h",
@ -33,6 +35,7 @@ on:
"**/*.c", "**/*.c",
"**/*.cpp", "**/*.cpp",
"**/*.cu", "**/*.cu",
"examples/server/frontend/**",
] ]
env: env:
@ -53,6 +56,16 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Dependencies - name: Dependencies
id: depends id: depends
run: | run: |
@ -70,7 +83,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Fetch system info - name: Fetch system info
id: system-info id: system-info
@ -106,6 +119,16 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Dependencies - name: Dependencies
id: depends id: depends
run: | run: |
@ -123,7 +146,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Fetch system info - name: Fetch system info
id: system-info id: system-info
@ -162,7 +185,7 @@ jobs:
strategy: strategy:
matrix: matrix:
variant: [musa, sycl, vulkan] variant: [musa, sycl, vulkan, cuda]
env: env:
REGISTRY: ghcr.io REGISTRY: ghcr.io
@ -174,10 +197,20 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v3
@ -223,6 +256,16 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Dependencies - name: Dependencies
id: depends id: depends
run: | run: |
@ -240,7 +283,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Fetch system info - name: Fetch system info
id: system-info id: system-info
@ -294,6 +337,16 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Install cuda-toolkit - name: Install cuda-toolkit
id: cuda-toolkit id: cuda-toolkit
if: ${{ matrix.build == 'cuda12' }} if: ${{ matrix.build == 'cuda12' }}
@ -340,7 +393,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Pack artifacts - name: Pack artifacts
id: pack_artifacts id: pack_artifacts
@ -399,6 +452,16 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Cache ROCm Installation - name: Cache ROCm Installation
id: cache-rocm id: cache-rocm
uses: actions/cache@v4 uses: actions/cache@v4
@ -463,7 +526,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Pack artifacts - name: Pack artifacts
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
@ -502,6 +565,16 @@ jobs:
with: with:
submodules: recursive submodules: recursive
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: 20
- name: Setup pnpm
uses: pnpm/action-setup@v4
with:
version: 9
- name: Free disk space - name: Free disk space
run: | run: |
# Remove preinstalled SDKs and caches not needed for this job # Remove preinstalled SDKs and caches not needed for this job
@ -535,31 +608,30 @@ jobs:
# Add ROCm to PATH for current session # Add ROCm to PATH for current session
echo "/opt/rocm/bin" >> $GITHUB_PATH echo "/opt/rocm/bin" >> $GITHUB_PATH
# Build case pattern from GPU_TARGETS # Build regex pattern from ${{ env.GPU_TARGETS }} (match target as substring)
PATTERN=$(printf '%s' "$GPU_TARGETS" | sed 's/;/\*|\*/g') TARGET_REGEX="($(printf '%s' "${{ env.GPU_TARGETS }}" | sed 's/;/|/g'))"
PATTERN="*${PATTERN}*"
# Remove library files for architectures we're not building for to save disk space # Remove library files for architectures we're not building for to save disk space
echo "Cleaning up unneeded architecture files..." echo "Cleaning up unneeded architecture files..."
cd /opt/rocm/lib/rocblas/library cd /opt/rocm/lib/rocblas/library
# Keep only our target architectures # Keep only our target architectures
for file in *; do for file in *; do
case "$file" in if printf '%s' "$file" | grep -q 'gfx'; then
$PATTERN) if ! printf '%s' "$file" | grep -Eq "$TARGET_REGEX"; then
;; echo "Removing $file" &&
*) sudo rm -f "$file";
sudo rm -f "$file" ;; fi
esac; fi
done done
cd /opt/rocm/lib/hipblaslt/library cd /opt/rocm/lib/hipblaslt/library
for file in *; do for file in *; do
case "$file" in if printf '%s' "$file" | grep -q 'gfx'; then
$PATTERN) if ! printf '%s' "$file" | grep -Eq "$TARGET_REGEX"; then
;; echo "Removing $file" &&
*) sudo rm -f "$file";
sudo rm -f "$file" ;; fi
esac; fi
done done
- name: Build - name: Build
@ -582,7 +654,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Prepare artifacts - name: Prepare artifacts
id: prepare_artifacts id: prepare_artifacts
@ -592,21 +664,15 @@ jobs:
cp ggml/LICENSE ./build/bin/ggml.txt cp ggml/LICENSE ./build/bin/ggml.txt
cp LICENSE ./build/bin/stable-diffusion.cpp.txt cp LICENSE ./build/bin/stable-diffusion.cpp.txt
# Create directories for ROCm libraries # Move ROCm runtime libraries (to avoid double space consumption)
mkdir -p ./build/bin/rocblas/library sudo mv /opt/rocm/lib/librocsparse.so* ./build/bin/
mkdir -p ./build/bin/hipblaslt/library sudo mv /opt/rocm/lib/libhsa-runtime64.so* ./build/bin/
sudo mv /opt/rocm/lib/libamdhip64.so* ./build/bin/
# Copy ROCm runtime libraries (use || true to continue if files don't exist) sudo mv /opt/rocm/lib/libhipblas.so* ./build/bin/
cp /opt/rocm/lib/librocsparse.so* ./build/bin/ || true sudo mv /opt/rocm/lib/libhipblaslt.so* ./build/bin/
cp /opt/rocm/lib/libhsa-runtime64.so* ./build/bin/ || true sudo mv /opt/rocm/lib/librocblas.so* ./build/bin/
cp /opt/rocm/lib/libamdhip64.so* ./build/bin/ || true sudo mv /opt/rocm/lib/rocblas/ ./build/bin/
cp /opt/rocm/lib/libhipblas.so* ./build/bin/ || true sudo mv /opt/rocm/lib/hipblaslt/ ./build/bin/
cp /opt/rocm/lib/libhipblaslt.so* ./build/bin/ || true
cp /opt/rocm/lib/librocblas.so* ./build/bin/ || true
# Copy library files (already filtered to target architectures)
cp /opt/rocm/lib/rocblas/library/* ./build/bin/rocblas/library/ || true
cp /opt/rocm/lib/hipblaslt/library/* ./build/bin/hipblaslt/library/ || true
- name: Fetch system info - name: Fetch system info
id: system-info id: system-info
@ -622,7 +688,7 @@ jobs:
run: | run: |
cp ggml/LICENSE ./build/bin/ggml.txt cp ggml/LICENSE ./build/bin/ggml.txt
cp LICENSE ./build/bin/stable-diffusion.cpp.txt cp LICENSE ./build/bin/stable-diffusion.cpp.txt
zip -j sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-Ubuntu-${{ env.UBUNTU_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-rocm.zip ./build/bin/* zip -y -r sd-${{ env.BRANCH_NAME }}-${{ steps.commit.outputs.short }}-bin-${{ steps.system-info.outputs.OS_TYPE }}-Ubuntu-${{ env.UBUNTU_VERSION }}-${{ steps.system-info.outputs.CPU_ARCH }}-rocm.zip ./build/bin
- name: Upload artifacts - name: Upload artifacts
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
@ -667,7 +733,7 @@ jobs:
- name: Get commit hash - name: Get commit hash
id: commit id: commit
uses: pr-mpt/actions-commit-hash@v2 uses: prompt/actions-commit-hash@v2
- name: Create release - name: Create release
id: create_release id: create_release

3
.gitmodules vendored
View File

@ -1,3 +1,6 @@
[submodule "ggml"] [submodule "ggml"]
path = ggml path = ggml
url = https://github.com/ggml-org/ggml.git url = https://github.com/ggml-org/ggml.git
[submodule "examples/server/frontend"]
path = examples/server/frontend
url = https://github.com/leejet/stable-ui.git

View File

@ -36,7 +36,6 @@ option(SD_VULKAN "sd: vulkan backend" OFF)
option(SD_OPENCL "sd: opencl backend" OFF) option(SD_OPENCL "sd: opencl backend" OFF)
option(SD_SYCL "sd: sycl backend" OFF) option(SD_SYCL "sd: sycl backend" OFF)
option(SD_MUSA "sd: musa backend" OFF) option(SD_MUSA "sd: musa backend" OFF)
option(SD_FAST_SOFTMAX "sd: x1.5 faster softmax, indeterministic (sometimes, same seed don't generate same image), cuda only" OFF)
option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF) option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF)
option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF) option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF)
option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF) option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF)
@ -70,26 +69,22 @@ if (SD_HIPBLAS)
message("-- Use HIPBLAS as backend stable-diffusion") message("-- Use HIPBLAS as backend stable-diffusion")
set(GGML_HIP ON) set(GGML_HIP ON)
add_definitions(-DSD_USE_CUDA) add_definitions(-DSD_USE_CUDA)
if(SD_FAST_SOFTMAX)
set(GGML_CUDA_FAST_SOFTMAX ON)
endif()
endif () endif ()
if(SD_MUSA) if(SD_MUSA)
message("-- Use MUSA as backend stable-diffusion") message("-- Use MUSA as backend stable-diffusion")
set(GGML_MUSA ON) set(GGML_MUSA ON)
add_definitions(-DSD_USE_CUDA) add_definitions(-DSD_USE_CUDA)
if(SD_FAST_SOFTMAX)
set(GGML_CUDA_FAST_SOFTMAX ON)
endif()
endif() endif()
set(SD_LIB stable-diffusion) set(SD_LIB stable-diffusion)
file(GLOB SD_LIB_SOURCES file(GLOB SD_LIB_SOURCES
"*.h" "src/*.h"
"*.cpp" "src/*.cpp"
"*.hpp" "src/*.hpp"
"src/vocab/*.h"
"src/vocab/*.cpp"
) )
find_program(GIT_EXE NAMES git git.exe NO_CMAKE_FIND_ROOT_PATH) find_program(GIT_EXE NAMES git git.exe NO_CMAKE_FIND_ROOT_PATH)
@ -119,7 +114,7 @@ endif()
message(STATUS "stable-diffusion.cpp commit ${SDCPP_BUILD_COMMIT}") message(STATUS "stable-diffusion.cpp commit ${SDCPP_BUILD_COMMIT}")
set_property( set_property(
SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/version.cpp SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/src/version.cpp
APPEND PROPERTY COMPILE_DEFINITIONS APPEND PROPERTY COMPILE_DEFINITIONS
SDCPP_BUILD_COMMIT=${SDCPP_BUILD_COMMIT} SDCPP_BUILD_VERSION=${SDCPP_BUILD_VERSION} SDCPP_BUILD_COMMIT=${SDCPP_BUILD_COMMIT} SDCPP_BUILD_VERSION=${SDCPP_BUILD_VERSION}
) )
@ -182,6 +177,7 @@ endif()
add_subdirectory(thirdparty) add_subdirectory(thirdparty)
target_link_libraries(${SD_LIB} PUBLIC ggml zip) target_link_libraries(${SD_LIB} PUBLIC ggml zip)
target_include_directories(${SD_LIB} PUBLIC . include)
target_include_directories(${SD_LIB} PUBLIC . thirdparty) target_include_directories(${SD_LIB} PUBLIC . thirdparty)
target_compile_features(${SD_LIB} PUBLIC c_std_11 cxx_std_17) target_compile_features(${SD_LIB} PUBLIC c_std_11 cxx_std_17)
@ -190,7 +186,7 @@ if (SD_BUILD_EXAMPLES)
add_subdirectory(examples) add_subdirectory(examples)
endif() endif()
set(SD_PUBLIC_HEADERS stable-diffusion.h) set(SD_PUBLIC_HEADERS include/stable-diffusion.h)
set_target_properties(${SD_LIB} PROPERTIES PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") set_target_properties(${SD_LIB} PROPERTIES PUBLIC_HEADER "${SD_PUBLIC_HEADERS}")
install(TARGETS ${SD_LIB} LIBRARY PUBLIC_HEADER) install(TARGETS ${SD_LIB} LIBRARY PUBLIC_HEADER)

25
Dockerfile.cuda Normal file
View File

@ -0,0 +1,25 @@
ARG CUDA_VERSION=12.6.3
ARG UBUNTU_VERSION=24.04
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION} AS build
RUN apt-get update && apt-get install -y --no-install-recommends build-essential git ccache cmake
WORKDIR /sd.cpp
COPY . .
ARG CUDACXX=/usr/local/cuda/bin/nvcc
RUN cmake . -B ./build -DSD_CUDA=ON
RUN cmake --build ./build --config Release -j$(nproc)
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-runtime-ubuntu${UBUNTU_VERSION} AS runtime
RUN apt-get update && \
apt-get install --yes --no-install-recommends libgomp1 && \
apt-get clean
COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli
COPY --from=build /sd.cpp/build/bin/sd-server /sd-server
ENTRYPOINT [ "/sd-cli" ]

View File

@ -53,6 +53,7 @@ API and command-line option may change frequently.***
- [Qwen Image](./docs/qwen_image.md) - [Qwen Image](./docs/qwen_image.md)
- [Z-Image](./docs/z_image.md) - [Z-Image](./docs/z_image.md)
- [Ovis-Image](./docs/ovis_image.md) - [Ovis-Image](./docs/ovis_image.md)
- [Anima](./docs/anima.md)
- Image Edit Models - Image Edit Models
- [FLUX.1-Kontext-dev](./docs/kontext.md) - [FLUX.1-Kontext-dev](./docs/kontext.md)
- [Qwen Image Edit series](./docs/qwen_image_edit.md) - [Qwen Image Edit series](./docs/qwen_image_edit.md)
@ -139,6 +140,7 @@ If you want to improve performance or reduce VRAM/RAM usage, please refer to [pe
- [🔥Wan2.1/Wan2.2](./docs/wan.md) - [🔥Wan2.1/Wan2.2](./docs/wan.md)
- [🔥Z-Image](./docs/z_image.md) - [🔥Z-Image](./docs/z_image.md)
- [Ovis-Image](./docs/ovis_image.md) - [Ovis-Image](./docs/ovis_image.md)
- [Anima](./docs/anima.md)
- [LoRA](./docs/lora.md) - [LoRA](./docs/lora.md)
- [LCM/LCM-LoRA](./docs/lcm.md) - [LCM/LCM-LoRA](./docs/lcm.md)
- [Using PhotoMaker to personalize image generation](./docs/photo_maker.md) - [Using PhotoMaker to personalize image generation](./docs/photo_maker.md)

BIN
assets/anima/example.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 230 KiB

File diff suppressed because it is too large Load Diff

21
docs/anima.md Normal file
View File

@ -0,0 +1,21 @@
# How to Use
## Download weights
- Download Anima
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/diffusion_models
- gguf: https://huggingface.co/Bedovyy/Anima-GGUF/tree/main
- gguf Anima2: https://huggingface.co/JusteLeo/Anima2-GGUF/tree/main
- Download vae
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/vae
- Download Qwen3-0.6B-Base
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/text_encoders
- gguf: https://huggingface.co/mradermacher/Qwen3-0.6B-Base-GGUF/tree/main
## Examples
```sh
.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\anima-preview.safetensors --vae ..\..\ComfyUI\models\vae\qwen_image_vae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_06b_base.safetensors -p "a lovely cat holding a sign says 'anima.cpp'" --cfg-scale 6.0 --sampling-method euler -v --offload-to-cpu --diffusion-fa
```
<img alt="anima image example" src="../assets/anima/example.png" />

View File

@ -11,6 +11,7 @@ Caching methods accelerate diffusion inference by reusing intermediate computati
| `dbcache` | DiT models | Block-level L1 residual threshold | | `dbcache` | DiT models | Block-level L1 residual threshold |
| `taylorseer` | DiT models | Taylor series approximation | | `taylorseer` | DiT models | Taylor series approximation |
| `cache-dit` | DiT models | Combined DBCache + TaylorSeer | | `cache-dit` | DiT models | Combined DBCache + TaylorSeer |
| `spectrum` | UNET and DiT models | Chebyshev + Taylor output forecasting |
### UCache (UNET Models) ### UCache (UNET Models)
@ -79,7 +80,7 @@ Uses Taylor series approximation to predict block outputs:
Combines DBCache and TaylorSeer: Combines DBCache and TaylorSeer:
```bash ```bash
--cache-mode cache-dit --cache-preset fast --cache-mode cache-dit
``` ```
#### Parameters #### Parameters
@ -91,14 +92,6 @@ Combines DBCache and TaylorSeer:
| `threshold` | L1 residual difference threshold | 0.08 | | `threshold` | L1 residual difference threshold | 0.08 |
| `warmup` | Steps before caching starts | 8 | | `warmup` | Steps before caching starts | 8 |
#### Presets
Available presets: `slow`, `medium`, `fast`, `ultra` (or `s`, `m`, `f`, `u`).
```bash
--cache-mode cache-dit --cache-preset fast
```
#### SCM Options #### SCM Options
Steps Computation Mask controls which steps can be cached: Steps Computation Mask controls which steps can be cached:
@ -118,6 +111,28 @@ Mask values: `1` = compute, `0` = can cache.
--scm-policy dynamic --scm-policy dynamic
``` ```
### Spectrum (UNET and DiT Models)
Spectrum uses Chebyshev polynomial fitting blended with Taylor extrapolation to predict denoised outputs, skipping entire forward passes. Based on the paper [Spectrum: Adaptive Spectral Feature Forecasting for Efficient Diffusion Sampling](https://github.com/tingyu215/Spectrum).
```bash
sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum
```
#### Parameters
| Parameter | Description | Default |
|-----------|-------------|---------|
| `w` | Chebyshev vs Taylor blend weight (0=Taylor, 1=Chebyshev) | 0.40 |
| `m` | Chebyshev polynomial degree | 3 |
| `lam` | Ridge regression regularization | 1.0 |
| `window` | Initial window size (compute every N steps) | 2 |
| `flex` | Window growth per computed step after warmup | 0.50 |
| `warmup` | Steps to always compute before caching starts | 4 |
| `stop` | Stop caching at this fraction of total steps | 0.9 |
```
### Performance Tips ### Performance Tips
- Start with default thresholds and adjust based on output quality - Start with default thresholds and adjust based on output quality

View File

@ -4,11 +4,12 @@
usage: ./bin/sd-cli [options] usage: ./bin/sd-cli [options]
CLI Options: CLI Options:
-o, --output <string> path to write result image to. you can use printf-style %d format specifiers for image sequences (default: ./output.png) (eg. output_%03d.png) -o, --output <string> path to write result image to. you can use printf-style %d format specifiers for image sequences (default:
--output-begin-idx <int> starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise) ./output.png) (eg. output_%03d.png)
--preview-path <string> path to write preview image to (default: ./preview.png) --preview-path <string> path to write preview image to (default: ./preview.png)
--preview-interval <int> interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at --preview-interval <int> interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at
every step) every step)
--output-begin-idx <int> starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise)
--canny apply canny preprocessor (edge detection) --canny apply canny preprocessor (edge detection)
--convert-name convert tensor name (for convert mode) --convert-name convert tensor name (for convert mode)
-v, --verbose print extra info -v, --verbose print extra info
@ -44,7 +45,6 @@ Context Options:
CPU physical cores CPU physical cores
--chroma-t5-mask-pad <int> t5 mask pad size of chroma --chroma-t5-mask-pad <int> t5 mask pad size of chroma
--vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5) --vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5)
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
--vae-tiling process vae in tiles to reduce memory usage --vae-tiling process vae in tiles to reduce memory usage
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed
@ -60,6 +60,7 @@ Context Options:
--circularx enable circular RoPE wrapping on x-axis (width) only --circularx enable circular RoPE wrapping on x-axis (width) only
--circulary enable circular RoPE wrapping on y-axis (height) only --circulary enable circular RoPE wrapping on y-axis (height) only
--chroma-disable-dit-mask disable dit mask for chroma --chroma-disable-dit-mask disable dit mask for chroma
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
--chroma-enable-t5-mask enable t5 mask for chroma --chroma-enable-t5-mask enable t5 mask for chroma
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the --type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
type of the weight file type of the weight file
@ -108,14 +109,15 @@ Generation Options:
medium medium
--skip-layer-start <float> SLG enabling point (default: 0.01) --skip-layer-start <float> SLG enabling point (default: 0.01)
--skip-layer-end <float> SLG disabling point (default: 0.2) --skip-layer-end <float> SLG disabling point (default: 0.2)
--eta <float> eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0) --eta <float> eta in DDIM, only for DDIM and TCD (default: 0)
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0) --high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale) --high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
--high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input (default: 3.5) --high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input (default: 3.5)
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0) --high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01) --high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2) --high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
--high-noise-eta <float> (high noise) eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0) --high-noise-eta <float> (high noise) eta in DDIM, only for DDIM and TCD (default: 0)
--strength <float> strength for noising/unnoising (default: 0.75) --strength <float> strength for noising/unnoising (default: 0.75)
--pm-style-strength <float> --pm-style-strength <float>
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image --control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
@ -124,21 +126,24 @@ Generation Options:
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1). --increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
--disable-auto-resize-ref-image disable auto resize of ref images --disable-auto-resize-ref-image disable auto resize of ref images
-s, --seed RNG seed (default: 42, use random seed for < 0) -s, --seed RNG seed (default: 42, use random seed for < 0)
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, --sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a otherwise) tcd, res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, otherwise)
tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan, euler_a otherwise --high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm,
ddim_trailing, tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan,
euler_a otherwise
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, --scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
kl_optimal, lcm, bong_tangent], default: discrete kl_optimal, lcm, bong_tangent], default: discrete
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0"). --sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
--skip-layers layers to skip for SLG steps (default: [7,8,9]) --skip-layers layers to skip for SLG steps (default: [7,8,9])
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9]) --high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
-r, --ref-image reference image for Flux Kontext models (can be used multiple times) -r, --ref-image reference image for Flux Kontext models (can be used multiple times)
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level) --cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level),
'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
--cache-option named cache params (key=value format, comma-separated). easycache/ucache: --cache-option named cache params (key=value format, comma-separated). easycache/ucache:
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=;
"threshold=0.25" or "threshold=1.5,reset=0" spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples:
--cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u' "threshold=0.25" or "threshold=1.5,reset=0" or "w=0.4,window=2"
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache --scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
--scm-policy SCM policy: 'dynamic' (default) or 'static' --scm-policy SCM policy: 'dynamic' (default) or 'static'
``` ```

View File

@ -601,7 +601,7 @@ int main(int argc, const char* argv[]) {
if (gen_params.end_image_path.size() > 0) { if (gen_params.end_image_path.size() > 0) {
vae_decode_only = false; vae_decode_only = false;
if (!load_image_and_update_size(gen_params.init_image_path, end_image)) { if (!load_image_and_update_size(gen_params.end_image_path, end_image)) {
return 1; return 1;
} }
} }

View File

@ -581,10 +581,6 @@ struct SDContextParams {
"--vae-tile-overlap", "--vae-tile-overlap",
"tile overlap for vae tiling, in fraction of tile size (default: 0.5)", "tile overlap for vae tiling, in fraction of tile size (default: 0.5)",
&vae_tiling_params.target_overlap}, &vae_tiling_params.target_overlap},
{"",
"--flow-shift",
"shift value for Flow models like SD3.x or WAN (default: auto)",
&flow_shift},
}; };
options.bool_options = { options.bool_options = {
@ -903,7 +899,6 @@ struct SDContextParams {
<< " photo_maker_path: \"" << photo_maker_path << "\",\n" << " photo_maker_path: \"" << photo_maker_path << "\",\n"
<< " rng_type: " << sd_rng_type_name(rng_type) << ",\n" << " rng_type: " << sd_rng_type_name(rng_type) << ",\n"
<< " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n" << " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n"
<< " flow_shift: " << (std::isinf(flow_shift) ? "INF" : std::to_string(flow_shift)) << "\n"
<< " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n" << " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n"
<< " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n" << " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n"
<< " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n"
@ -986,7 +981,6 @@ struct SDContextParams {
chroma_use_t5_mask, chroma_use_t5_mask,
chroma_t5_mask_pad, chroma_t5_mask_pad,
qwen_image_zero_cond_t, qwen_image_zero_cond_t,
flow_shift,
}; };
return sd_ctx_params; return sd_ctx_params;
} }
@ -1053,7 +1047,6 @@ struct SDGenerationParams {
std::string cache_mode; std::string cache_mode;
std::string cache_option; std::string cache_option;
std::string cache_preset;
std::string scm_mask; std::string scm_mask;
bool scm_policy_dynamic = true; bool scm_policy_dynamic = true;
sd_cache_params_t cache_params{}; sd_cache_params_t cache_params{};
@ -1206,6 +1199,10 @@ struct SDGenerationParams {
"--eta", "--eta",
"eta in DDIM, only for DDIM and TCD (default: 0)", "eta in DDIM, only for DDIM and TCD (default: 0)",
&sample_params.eta}, &sample_params.eta},
{"",
"--flow-shift",
"shift value for Flow models like SD3.x or WAN (default: auto)",
&sample_params.flow_shift},
{"", {"",
"--high-noise-cfg-scale", "--high-noise-cfg-scale",
"(high noise) unconditional guidance scale: (default: 7.0)", "(high noise) unconditional guidance scale: (default: 7.0)",
@ -1424,8 +1421,8 @@ struct SDGenerationParams {
} }
cache_mode = argv_to_utf8(index, argv); cache_mode = argv_to_utf8(index, argv);
if (cache_mode != "easycache" && cache_mode != "ucache" && if (cache_mode != "easycache" && cache_mode != "ucache" &&
cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit") { cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit" && cache_mode != "spectrum") {
fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', or 'cache-dit'\n", cache_mode.c_str()); fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', 'cache-dit', or 'spectrum'\n", cache_mode.c_str());
return -1; return -1;
} }
return 1; return 1;
@ -1463,21 +1460,6 @@ struct SDGenerationParams {
return 1; return 1;
}; };
auto on_cache_preset_arg = [&](int argc, const char** argv, int index) {
if (++index >= argc) {
return -1;
}
cache_preset = argv_to_utf8(index, argv);
if (cache_preset != "slow" && cache_preset != "s" && cache_preset != "S" &&
cache_preset != "medium" && cache_preset != "m" && cache_preset != "M" &&
cache_preset != "fast" && cache_preset != "f" && cache_preset != "F" &&
cache_preset != "ultra" && cache_preset != "u" && cache_preset != "U") {
fprintf(stderr, "error: invalid cache preset '%s', must be 'slow'/'s', 'medium'/'m', 'fast'/'f', or 'ultra'/'u'\n", cache_preset.c_str());
return -1;
}
return 1;
};
options.manual_options = { options.manual_options = {
{"-s", {"-s",
"--seed", "--seed",
@ -1515,16 +1497,12 @@ struct SDGenerationParams {
on_ref_image_arg}, on_ref_image_arg},
{"", {"",
"--cache-mode", "--cache-mode",
"caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)", "caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)",
on_cache_mode_arg}, on_cache_mode_arg},
{"", {"",
"--cache-option", "--cache-option",
"named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"", "named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=; spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"",
on_cache_option_arg}, on_cache_option_arg},
{"",
"--cache-preset",
"cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'",
on_cache_preset_arg},
{"", {"",
"--scm-mask", "--scm-mask",
"SCM steps mask for cache-dit: comma-separated 0/1 (e.g., \"1,1,1,0,0,1,0,0,1,0\") - 1=compute, 0=can cache", "SCM steps mask for cache-dit: comma-separated 0/1 (e.g., \"1,1,1,0,0,1,0,0,1,0\") - 1=compute, 0=can cache",
@ -1577,7 +1555,6 @@ struct SDGenerationParams {
load_if_exists("negative_prompt", negative_prompt); load_if_exists("negative_prompt", negative_prompt);
load_if_exists("cache_mode", cache_mode); load_if_exists("cache_mode", cache_mode);
load_if_exists("cache_option", cache_option); load_if_exists("cache_option", cache_option);
load_if_exists("cache_preset", cache_preset);
load_if_exists("scm_mask", scm_mask); load_if_exists("scm_mask", scm_mask);
load_if_exists("clip_skip", clip_skip); load_if_exists("clip_skip", clip_skip);
@ -1606,6 +1583,7 @@ struct SDGenerationParams {
load_if_exists("cfg_scale", sample_params.guidance.txt_cfg); load_if_exists("cfg_scale", sample_params.guidance.txt_cfg);
load_if_exists("img_cfg_scale", sample_params.guidance.img_cfg); load_if_exists("img_cfg_scale", sample_params.guidance.img_cfg);
load_if_exists("guidance", sample_params.guidance.distilled_guidance); load_if_exists("guidance", sample_params.guidance.distilled_guidance);
load_if_exists("flow_shift", sample_params.flow_shift);
auto load_sampler_if_exists = [&](const char* key, enum sample_method_t& out) { auto load_sampler_if_exists = [&](const char* key, enum sample_method_t& out) {
if (j.contains(key) && j[key].is_string()) { if (j.contains(key) && j[key].is_string()) {
@ -1780,7 +1758,23 @@ struct SDGenerationParams {
} else if (key == "Bn" || key == "bn") { } else if (key == "Bn" || key == "bn") {
cache_params.Bn_compute_blocks = std::stoi(val); cache_params.Bn_compute_blocks = std::stoi(val);
} else if (key == "warmup") { } else if (key == "warmup") {
cache_params.max_warmup_steps = std::stoi(val); if (cache_mode == "spectrum") {
cache_params.spectrum_warmup_steps = std::stoi(val);
} else {
cache_params.max_warmup_steps = std::stoi(val);
}
} else if (key == "w") {
cache_params.spectrum_w = std::stof(val);
} else if (key == "m") {
cache_params.spectrum_m = std::stoi(val);
} else if (key == "lam") {
cache_params.spectrum_lam = std::stof(val);
} else if (key == "window") {
cache_params.spectrum_window_size = std::stoi(val);
} else if (key == "flex") {
cache_params.spectrum_flex_window = std::stof(val);
} else if (key == "stop") {
cache_params.spectrum_stop_percent = std::stof(val);
} else { } else {
LOG_ERROR("error: unknown cache parameter '%s'", key.c_str()); LOG_ERROR("error: unknown cache parameter '%s'", key.c_str());
return false; return false;
@ -1795,39 +1789,17 @@ struct SDGenerationParams {
if (!cache_mode.empty()) { if (!cache_mode.empty()) {
if (cache_mode == "easycache") { if (cache_mode == "easycache") {
cache_params.mode = SD_CACHE_EASYCACHE; cache_params.mode = SD_CACHE_EASYCACHE;
cache_params.reuse_threshold = 0.2f;
cache_params.start_percent = 0.15f;
cache_params.end_percent = 0.95f;
cache_params.error_decay_rate = 1.0f;
cache_params.use_relative_threshold = true;
cache_params.reset_error_on_compute = true;
} else if (cache_mode == "ucache") { } else if (cache_mode == "ucache") {
cache_params.mode = SD_CACHE_UCACHE; cache_params.mode = SD_CACHE_UCACHE;
cache_params.reuse_threshold = 1.0f;
cache_params.start_percent = 0.15f;
cache_params.end_percent = 0.95f;
cache_params.error_decay_rate = 1.0f;
cache_params.use_relative_threshold = true;
cache_params.reset_error_on_compute = true;
} else if (cache_mode == "dbcache") { } else if (cache_mode == "dbcache") {
cache_params.mode = SD_CACHE_DBCACHE; cache_params.mode = SD_CACHE_DBCACHE;
cache_params.Fn_compute_blocks = 8;
cache_params.Bn_compute_blocks = 0;
cache_params.residual_diff_threshold = 0.08f;
cache_params.max_warmup_steps = 8;
} else if (cache_mode == "taylorseer") { } else if (cache_mode == "taylorseer") {
cache_params.mode = SD_CACHE_TAYLORSEER; cache_params.mode = SD_CACHE_TAYLORSEER;
cache_params.Fn_compute_blocks = 8;
cache_params.Bn_compute_blocks = 0;
cache_params.residual_diff_threshold = 0.08f;
cache_params.max_warmup_steps = 8;
} else if (cache_mode == "cache-dit") { } else if (cache_mode == "cache-dit") {
cache_params.mode = SD_CACHE_CACHE_DIT; cache_params.mode = SD_CACHE_CACHE_DIT;
cache_params.Fn_compute_blocks = 8; } else if (cache_mode == "spectrum") {
cache_params.Bn_compute_blocks = 0; cache_params.mode = SD_CACHE_SPECTRUM;
cache_params.residual_diff_threshold = 0.08f;
cache_params.max_warmup_steps = 8;
} }
if (!cache_option.empty()) { if (!cache_option.empty()) {

View File

@ -1,6 +1,73 @@
set(TARGET sd-server) set(TARGET sd-server)
option(SD_SERVER_BUILD_FRONTEND "Build server frontend with pnpm" ON)
set(FRONTEND_DIR "${CMAKE_CURRENT_SOURCE_DIR}/frontend")
set(GENERATED_HTML_HEADER "${FRONTEND_DIR}/dist/gen_index_html.h")
set(HAVE_FRONTEND_BUILD OFF)
if(SD_SERVER_BUILD_FRONTEND AND EXISTS "${FRONTEND_DIR}")
if(WIN32)
find_program(PNPM_EXECUTABLE NAMES pnpm.cmd pnpm)
else()
find_program(PNPM_EXECUTABLE NAMES pnpm)
endif()
if(PNPM_EXECUTABLE)
message(STATUS "Frontend dir found: ${FRONTEND_DIR}")
message(STATUS "pnpm found: ${PNPM_EXECUTABLE}")
set(HAVE_FRONTEND_BUILD ON)
add_custom_target(${TARGET}_frontend_install
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" install
WORKING_DIRECTORY "${FRONTEND_DIR}"
COMMENT "Installing frontend dependencies"
VERBATIM
)
add_custom_target(${TARGET}_frontend_build
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" run build
WORKING_DIRECTORY "${FRONTEND_DIR}"
COMMENT "Building frontend"
VERBATIM
)
add_custom_target(${TARGET}_frontend_header
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" run build:header
WORKING_DIRECTORY "${FRONTEND_DIR}"
COMMENT "Generating gen_index_html.h"
VERBATIM
)
add_dependencies(${TARGET}_frontend_build ${TARGET}_frontend_install)
add_dependencies(${TARGET}_frontend_header ${TARGET}_frontend_build)
add_custom_target(${TARGET}_frontend
DEPENDS ${TARGET}_frontend_header
)
set_source_files_properties("${GENERATED_HTML_HEADER}" PROPERTIES GENERATED TRUE)
else()
message(WARNING "pnpm not found, frontend build disabled")
endif()
else()
message(STATUS "Frontend disabled or directory not found: ${FRONTEND_DIR}")
endif()
add_executable(${TARGET} main.cpp) add_executable(${TARGET} main.cpp)
if(HAVE_FRONTEND_BUILD)
add_dependencies(${TARGET} ${TARGET}_frontend)
target_sources(${TARGET} PRIVATE "${GENERATED_HTML_HEADER}")
target_include_directories(${TARGET} PRIVATE "${FRONTEND_DIR}/dist")
target_compile_definitions(${TARGET} PRIVATE HAVE_INDEX_HTML)
message(STATUS "HAVE_INDEX_HTML enabled")
else()
message(STATUS "HAVE_INDEX_HTML disabled")
endif()
install(TARGETS ${TARGET} RUNTIME) install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE stable-diffusion ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${TARGET} PRIVATE stable-diffusion ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PUBLIC c_std_11 cxx_std_17) target_compile_features(${TARGET} PUBLIC c_std_11 cxx_std_17)

View File

@ -1,15 +1,104 @@
# Frontend
## Build with Frontend
The server can optionally build the web frontend and embed it into the binary as `gen_index_html.h`.
### Requirements
Install the following tools:
* **Node.js** ≥ 22.18
https://nodejs.org/
* **pnpm** ≥ 10
Install via npm:
```bash
npm install -g pnpm
```
Verify installation:
```bash
node -v
pnpm -v
```
### Install frontend dependencies
Go to the frontend directory and install dependencies:
```bash
cd examples/server/frontend
pnpm install
```
### Build the server with CMake
Enable the frontend build option when configuring CMake:
```bash
cmake -B build -DSD_SERVER_BUILD_FRONTEND=ON
cmake --build build --config Release
```
If `pnpm` is available, the build system will automatically run:
```
pnpm run build
pnpm run build:header
```
and embed the generated frontend into the server binary.
## Frontend Repository
The web frontend is maintained in a **separate repository**, https://github.com/leejet/stable-ui.
If you want to modify the UI or frontend logic, please submit pull requests to the **frontend repository**.
This repository (`stable-diffusion.cpp`) only vendors the frontend periodically. Changes from the frontend repo are synchronized:
* approximately **every 12 weeks**, or
* when there are **major frontend updates**
Because of this, frontend changes will **not appear here immediately** after being merged upstream.
## Using an external frontend
By default, the server uses the **embedded frontend** generated during the build (`gen_index_html.h`).
You can also serve a custom frontend file instead of the embedded one by using:
```bash
--serve-html-path <path-to-index.html>
```
For example:
```bash
sd-server --serve-html-path ./index.html
```
In this case, the server will load and serve the specified `index.html` file instead of the embedded frontend. This is useful when:
* developing or testing frontend changes
* using a custom UI
* avoiding rebuilding the binary after frontend modifications
# Run # Run
``` ```
usage: ./bin/sd-server [options] usage: ./bin/sd-server [options]
Svr Options: Svr Options:
-l, --listen-ip <string> server listen ip (default: 127.0.0.1) -l, --listen-ip <string> server listen ip (default: 127.0.0.1)
--listen-port <int> server listen port (default: 1234) --serve-html-path <string> path to HTML file to serve at root (optional)
--serve-html-path <string> path to HTML file to serve at root (optional) --listen-port <int> server listen port (default: 1234)
-v, --verbose print extra info -v, --verbose print extra info
--color colors the logging tags according to level --color colors the logging tags according to level
-h, --help show this help message and exit -h, --help show this help message and exit
Context Options: Context Options:
-m, --model <string> path to full model -m, --model <string> path to full model
@ -36,14 +125,13 @@ Context Options:
CPU physical cores CPU physical cores
--chroma-t5-mask-pad <int> t5 mask pad size of chroma --chroma-t5-mask-pad <int> t5 mask pad size of chroma
--vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5) --vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5)
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
--vae-tiling process vae in tiles to reduce memory usage --vae-tiling process vae in tiles to reduce memory usage
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed
--mmap whether to memory-map model
--control-net-cpu keep controlnet in cpu (for low vram) --control-net-cpu keep controlnet in cpu (for low vram)
--clip-on-cpu keep clip in cpu (for low vram) --clip-on-cpu keep clip in cpu (for low vram)
--vae-on-cpu keep vae in cpu (for low vram) --vae-on-cpu keep vae in cpu (for low vram)
--mmap whether to memory-map model
--fa use flash attention --fa use flash attention
--diffusion-fa use flash attention in the diffusion model only --diffusion-fa use flash attention in the diffusion model only
--diffusion-conv-direct use ggml_conv2d_direct in the diffusion model --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model
@ -52,6 +140,7 @@ Context Options:
--circularx enable circular RoPE wrapping on x-axis (width) only --circularx enable circular RoPE wrapping on x-axis (width) only
--circulary enable circular RoPE wrapping on y-axis (height) only --circulary enable circular RoPE wrapping on y-axis (height) only
--chroma-disable-dit-mask disable dit mask for chroma --chroma-disable-dit-mask disable dit mask for chroma
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
--chroma-enable-t5-mask enable t5 mask for chroma --chroma-enable-t5-mask enable t5 mask for chroma
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the --type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
type of the weight file type of the weight file
@ -100,14 +189,15 @@ Default Generation Options:
medium medium
--skip-layer-start <float> SLG enabling point (default: 0.01) --skip-layer-start <float> SLG enabling point (default: 0.01)
--skip-layer-end <float> SLG disabling point (default: 0.2) --skip-layer-end <float> SLG disabling point (default: 0.2)
--eta <float> eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0) --eta <float> eta in DDIM, only for DDIM and TCD (default: 0)
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0) --high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale) --high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
--high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input (default: 3.5) --high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input (default: 3.5)
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0) --high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01) --high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2) --high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
--high-noise-eta <float> (high noise) eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0) --high-noise-eta <float> (high noise) eta in DDIM, only for DDIM and TCD (default: 0)
--strength <float> strength for noising/unnoising (default: 0.75) --strength <float> strength for noising/unnoising (default: 0.75)
--pm-style-strength <float> --pm-style-strength <float>
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image --control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
@ -116,21 +206,22 @@ Default Generation Options:
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1). --increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
--disable-auto-resize-ref-image disable auto resize of ref images --disable-auto-resize-ref-image disable auto resize of ref images
-s, --seed RNG seed (default: 42, use random seed for < 0) -s, --seed RNG seed (default: 42, use random seed for < 0)
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, --sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a otherwise) tcd, res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, otherwise)
tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan, euler_a otherwise --high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm,
ddim_trailing, tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan,
euler_a otherwise
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple, --scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
kl_optimal, lcm, bong_tangent], default: discrete kl_optimal, lcm, bong_tangent], default: discrete
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0"). --sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
--skip-layers layers to skip for SLG steps (default: [7,8,9]) --skip-layers layers to skip for SLG steps (default: [7,8,9])
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9]) --high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
-r, --ref-image reference image for Flux Kontext models (can be used multiple times) -r, --ref-image reference image for Flux Kontext models (can be used multiple times)
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level) --cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
--cache-option named cache params (key=value format, comma-separated). easycache/ucache: --cache-option named cache params (key=value format, comma-separated). easycache/ucache:
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
"threshold=0.25" or "threshold=1.5,reset=0" "threshold=0.25" or "threshold=1.5,reset=0"
--cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache --scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
--scm-policy SCM policy: 'dynamic' (default) or 'static' --scm-policy SCM policy: 'dynamic' (default) or 'static'
``` ```

@ -0,0 +1 @@
Subproject commit 1a34176cd6d39ad3a226b2b69047e71f6797f6bc

View File

@ -13,6 +13,10 @@
#include "common/common.hpp" #include "common/common.hpp"
#ifdef HAVE_INDEX_HTML
#include "frontend/dist/gen_index_html.h"
#endif
namespace fs = std::filesystem; namespace fs = std::filesystem;
// ----------------------- helpers ----------------------- // ----------------------- helpers -----------------------
@ -266,8 +270,21 @@ void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
struct LoraEntry { struct LoraEntry {
std::string name; std::string name;
std::string path; std::string path;
std::string fullpath;
}; };
void free_results(sd_image_t* result_images, int num_results) {
if (result_images) {
for (int i = 0; i < num_results; ++i) {
if (result_images[i].data) {
stbi_image_free(result_images[i].data);
result_images[i].data = nullptr;
}
}
}
free(result_images);
}
int main(int argc, const char** argv) { int main(int argc, const char** argv) {
if (argc > 1 && std::string(argv[1]) == "--version") { if (argc > 1 && std::string(argv[1]) == "--version") {
std::cout << version_string() << "\n"; std::cout << version_string() << "\n";
@ -321,7 +338,8 @@ int main(int argc, const char** argv) {
LoraEntry e; LoraEntry e;
e.name = p.stem().u8string(); e.name = p.stem().u8string();
std::string rel = fs::relative(p, lora_dir).u8string(); e.fullpath = p.u8string();
std::string rel = p.lexically_relative(lora_dir).u8string();
std::replace(rel.begin(), rel.end(), '\\', '/'); std::replace(rel.begin(), rel.end(), '\\', '/');
e.path = rel; e.path = rel;
@ -340,10 +358,11 @@ int main(int argc, const char** argv) {
} }
}; };
auto is_valid_lora_path = [&](const std::string& path) -> bool { auto get_lora_full_path = [&](const std::string& path) -> std::string {
std::lock_guard<std::mutex> lock(lora_mutex); std::lock_guard<std::mutex> lock(lora_mutex);
return std::any_of(lora_cache.begin(), lora_cache.end(), auto it = std::find_if(lora_cache.begin(), lora_cache.end(),
[&](const LoraEntry& e) { return e.path == path; }); [&](const LoraEntry& e) { return e.path == path; });
return (it != lora_cache.end()) ? it->fullpath : "";
}; };
httplib::Server svr; httplib::Server svr;
@ -365,7 +384,13 @@ int main(int argc, const char** argv) {
return httplib::Server::HandlerResponse::Unhandled; return httplib::Server::HandlerResponse::Unhandled;
}); });
// root // index html
std::string index_html;
#ifdef HAVE_INDEX_HTML
index_html.assign(reinterpret_cast<const char*>(index_html_bytes), index_html_size);
#else
index_html = "Stable Diffusion Server is running";
#endif
svr.Get("/", [&](const httplib::Request&, httplib::Response& res) { svr.Get("/", [&](const httplib::Request&, httplib::Response& res) {
if (!svr_params.serve_html_path.empty()) { if (!svr_params.serve_html_path.empty()) {
std::ifstream file(svr_params.serve_html_path); std::ifstream file(svr_params.serve_html_path);
@ -377,7 +402,7 @@ int main(int argc, const char** argv) {
res.set_content("Error: Unable to read HTML file", "text/plain"); res.set_content("Error: Unable to read HTML file", "text/plain");
} }
} else { } else {
res.set_content("Stable Diffusion Server is running", "text/plain"); res.set_content(index_html, "text/html");
} }
}); });
@ -404,8 +429,8 @@ int main(int argc, const char** argv) {
std::string size = j.value("size", ""); std::string size = j.value("size", "");
std::string output_format = j.value("output_format", "png"); std::string output_format = j.value("output_format", "png");
int output_compression = j.value("output_compression", 100); int output_compression = j.value("output_compression", 100);
int width = 512; int width = default_gen_params.width > 0 ? default_gen_params.width : 512;
int height = 512; int height = default_gen_params.width > 0 ? default_gen_params.height : 512;
if (!size.empty()) { if (!size.empty()) {
auto pos = size.find('x'); auto pos = size.find('x');
if (pos != std::string::npos) { if (pos != std::string::npos) {
@ -534,6 +559,7 @@ int main(int argc, const char** argv) {
item["b64_json"] = b64; item["b64_json"] = b64;
out["data"].push_back(item); out["data"].push_back(item);
} }
free_results(results, num_results);
res.set_content(out.dump(), "application/json"); res.set_content(out.dump(), "application/json");
res.status = 200; res.status = 200;
@ -564,8 +590,9 @@ int main(int argc, const char** argv) {
std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(prompt); std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(prompt);
size_t image_count = req.form.get_file_count("image[]"); size_t image_count = req.form.get_file_count("image[]");
if (image_count == 0) { bool has_legacy_image = req.form.has_file("image");
if (image_count == 0 && !has_legacy_image) {
res.status = 400; res.status = 400;
res.set_content(R"({"error":"at least one image[] required"})", "application/json"); res.set_content(R"({"error":"at least one image[] required"})", "application/json");
return; return;
@ -576,6 +603,10 @@ int main(int argc, const char** argv) {
auto file = req.form.get_file("image[]", i); auto file = req.form.get_file("image[]", i);
images_bytes.emplace_back(file.content.begin(), file.content.end()); images_bytes.emplace_back(file.content.begin(), file.content.end());
} }
if (image_count == 0 && has_legacy_image) {
auto file = req.form.get_file("image");
images_bytes.emplace_back(file.content.begin(), file.content.end());
}
std::vector<uint8_t> mask_bytes; std::vector<uint8_t> mask_bytes;
if (req.form.has_file("mask")) { if (req.form.has_file("mask")) {
@ -593,7 +624,7 @@ int main(int argc, const char** argv) {
n = std::clamp(n, 1, 8); n = std::clamp(n, 1, 8);
std::string size = req.form.get_field("size"); std::string size = req.form.get_field("size");
int width = 512, height = 512; int width = -1, height = -1;
if (!size.empty()) { if (!size.empty()) {
auto pos = size.find('x'); auto pos = size.find('x');
if (pos != std::string::npos) { if (pos != std::string::npos) {
@ -650,15 +681,31 @@ int main(int argc, const char** argv) {
LOG_DEBUG("%s\n", gen_params.to_string().c_str()); LOG_DEBUG("%s\n", gen_params.to_string().c_str());
sd_image_t init_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; sd_image_t init_image = {0, 0, 3, nullptr};
sd_image_t control_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; sd_image_t control_image = {0, 0, 3, nullptr};
std::vector<sd_image_t> pmid_images; std::vector<sd_image_t> pmid_images;
auto get_resolved_width = [&gen_params, &default_gen_params]() -> int {
if (gen_params.width > 0)
return gen_params.width;
if (default_gen_params.width > 0)
return default_gen_params.width;
return 512;
};
auto get_resolved_height = [&gen_params, &default_gen_params]() -> int {
if (gen_params.height > 0)
return gen_params.height;
if (default_gen_params.height > 0)
return default_gen_params.height;
return 512;
};
std::vector<sd_image_t> ref_images; std::vector<sd_image_t> ref_images;
ref_images.reserve(images_bytes.size()); ref_images.reserve(images_bytes.size());
for (auto& bytes : images_bytes) { for (auto& bytes : images_bytes) {
int img_w = width; int img_w;
int img_h = height; int img_h;
uint8_t* raw_pixels = load_image_from_memory( uint8_t* raw_pixels = load_image_from_memory(
reinterpret_cast<const char*>(bytes.data()), reinterpret_cast<const char*>(bytes.data()),
static_cast<int>(bytes.size()), static_cast<int>(bytes.size()),
@ -670,22 +717,31 @@ int main(int argc, const char** argv) {
} }
sd_image_t img{(uint32_t)img_w, (uint32_t)img_h, 3, raw_pixels}; sd_image_t img{(uint32_t)img_w, (uint32_t)img_h, 3, raw_pixels};
gen_params.set_width_and_height_if_unset(img.width, img.height);
ref_images.push_back(img); ref_images.push_back(img);
} }
sd_image_t mask_image = {0}; sd_image_t mask_image = {0};
if (!mask_bytes.empty()) { if (!mask_bytes.empty()) {
int mask_w = width; int expected_width = 0;
int mask_h = height; int expected_height = 0;
if (gen_params.width_and_height_are_set()) {
expected_width = gen_params.width;
expected_height = gen_params.height;
}
int mask_w;
int mask_h;
uint8_t* mask_raw = load_image_from_memory( uint8_t* mask_raw = load_image_from_memory(
reinterpret_cast<const char*>(mask_bytes.data()), reinterpret_cast<const char*>(mask_bytes.data()),
static_cast<int>(mask_bytes.size()), static_cast<int>(mask_bytes.size()),
mask_w, mask_h, mask_w, mask_h,
width, height, 1); expected_width, expected_height, 1);
mask_image = {(uint32_t)mask_w, (uint32_t)mask_h, 1, mask_raw}; mask_image = {(uint32_t)mask_w, (uint32_t)mask_h, 1, mask_raw};
gen_params.set_width_and_height_if_unset(mask_image.width, mask_image.height);
} else { } else {
mask_image.width = width; mask_image.width = get_resolved_width();
mask_image.height = height; mask_image.height = get_resolved_height();
mask_image.channel = 1; mask_image.channel = 1;
mask_image.data = nullptr; mask_image.data = nullptr;
} }
@ -702,8 +758,8 @@ int main(int argc, const char** argv) {
gen_params.auto_resize_ref_image, gen_params.auto_resize_ref_image,
gen_params.increase_ref_index, gen_params.increase_ref_index,
mask_image, mask_image,
gen_params.width, get_resolved_width(),
gen_params.height, get_resolved_height(),
gen_params.sample_params, gen_params.sample_params,
gen_params.strength, gen_params.strength,
gen_params.seed, gen_params.seed,
@ -748,6 +804,7 @@ int main(int argc, const char** argv) {
item["b64_json"] = b64; item["b64_json"] = b64;
out["data"].push_back(item); out["data"].push_back(item);
} }
free_results(results, num_results);
res.set_content(out.dump(), "application/json"); res.set_content(out.dump(), "application/json");
res.status = 200; res.status = 200;
@ -837,11 +894,12 @@ int main(int argc, const char** argv) {
return bad("lora.path required"); return bad("lora.path required");
} }
if (!is_valid_lora_path(path)) { std::string fullpath = get_lora_full_path(path);
if (fullpath.empty()) {
return bad("invalid lora path: " + path); return bad("invalid lora path: " + path);
} }
lora_path_storage.push_back(path); lora_path_storage.push_back(fullpath);
sd_lora_t l; sd_lora_t l;
l.is_high_noise = is_high_noise; l.is_high_noise = is_high_noise;
l.multiplier = multiplier; l.multiplier = multiplier;
@ -886,8 +944,6 @@ int main(int argc, const char** argv) {
SDGenerationParams gen_params = default_gen_params; SDGenerationParams gen_params = default_gen_params;
gen_params.prompt = prompt; gen_params.prompt = prompt;
gen_params.negative_prompt = negative_prompt; gen_params.negative_prompt = negative_prompt;
gen_params.width = width;
gen_params.height = height;
gen_params.seed = seed; gen_params.seed = seed;
gen_params.sample_params.sample_steps = steps; gen_params.sample_params.sample_steps = steps;
gen_params.batch_count = batch_size; gen_params.batch_count = batch_size;
@ -905,38 +961,66 @@ int main(int argc, const char** argv) {
gen_params.sample_params.scheduler = scheduler; gen_params.sample_params.scheduler = scheduler;
} }
// re-read to avoid applying 512 as default before the provided
// images and/or server command-line
gen_params.width = j.value("width", -1);
gen_params.height = j.value("height", -1);
LOG_DEBUG("%s\n", gen_params.to_string().c_str()); LOG_DEBUG("%s\n", gen_params.to_string().c_str());
sd_image_t init_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; sd_image_t init_image = {0, 0, 3, nullptr};
sd_image_t control_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr}; sd_image_t control_image = {0, 0, 3, nullptr};
sd_image_t mask_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 1, nullptr}; sd_image_t mask_image = {0, 0, 1, nullptr};
std::vector<uint8_t> mask_data; std::vector<uint8_t> mask_data;
std::vector<sd_image_t> pmid_images; std::vector<sd_image_t> pmid_images;
std::vector<sd_image_t> ref_images; std::vector<sd_image_t> ref_images;
if (img2img) { auto get_resolved_width = [&gen_params, &default_gen_params]() -> int {
auto decode_image = [](sd_image_t& image, std::string encoded) -> bool { if (gen_params.width > 0)
// remove data URI prefix if present ("data:image/png;base64,") return gen_params.width;
auto comma_pos = encoded.find(','); if (default_gen_params.width > 0)
if (comma_pos != std::string::npos) { return default_gen_params.width;
encoded = encoded.substr(comma_pos + 1); return 512;
} };
std::vector<uint8_t> img_data = base64_decode(encoded); auto get_resolved_height = [&gen_params, &default_gen_params]() -> int {
if (!img_data.empty()) { if (gen_params.height > 0)
int img_w = image.width; return gen_params.height;
int img_h = image.height; if (default_gen_params.height > 0)
uint8_t* raw_data = load_image_from_memory( return default_gen_params.height;
(const char*)img_data.data(), (int)img_data.size(), return 512;
img_w, img_h, };
image.width, image.height, image.channel);
if (raw_data) {
image = {(uint32_t)img_w, (uint32_t)img_h, image.channel, raw_data};
return true;
}
}
return false;
};
auto decode_image = [&gen_params](sd_image_t& image, std::string encoded) -> bool {
// remove data URI prefix if present ("data:image/png;base64,")
auto comma_pos = encoded.find(',');
if (comma_pos != std::string::npos) {
encoded = encoded.substr(comma_pos + 1);
}
std::vector<uint8_t> img_data = base64_decode(encoded);
if (!img_data.empty()) {
int expected_width = 0;
int expected_height = 0;
if (gen_params.width_and_height_are_set()) {
expected_width = gen_params.width;
expected_height = gen_params.height;
}
int img_w;
int img_h;
uint8_t* raw_data = load_image_from_memory(
(const char*)img_data.data(), (int)img_data.size(),
img_w, img_h,
expected_width, expected_height, image.channel);
if (raw_data) {
image = {(uint32_t)img_w, (uint32_t)img_h, image.channel, raw_data};
gen_params.set_width_and_height_if_unset(image.width, image.height);
return true;
}
}
return false;
};
if (img2img) {
if (j.contains("init_images") && j["init_images"].is_array() && !j["init_images"].empty()) { if (j.contains("init_images") && j["init_images"].is_array() && !j["init_images"].empty()) {
std::string encoded = j["init_images"][0].get<std::string>(); std::string encoded = j["init_images"][0].get<std::string>();
decode_image(init_image, encoded); decode_image(init_image, encoded);
@ -952,23 +1036,15 @@ int main(int argc, const char** argv) {
} }
} }
} else { } else {
mask_data = std::vector<uint8_t>(width * height, 255); int m_width = get_resolved_width();
mask_image.width = width; int m_height = get_resolved_height();
mask_image.height = height; mask_data = std::vector<uint8_t>(m_width * m_height, 255);
mask_image.width = m_width;
mask_image.height = m_height;
mask_image.channel = 1; mask_image.channel = 1;
mask_image.data = mask_data.data(); mask_image.data = mask_data.data();
} }
if (j.contains("extra_images") && j["extra_images"].is_array()) {
for (auto extra_image : j["extra_images"]) {
std::string encoded = extra_image.get<std::string>();
sd_image_t tmp_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr};
if (decode_image(tmp_image, encoded)) {
ref_images.push_back(tmp_image);
}
}
}
float denoising_strength = j.value("denoising_strength", -1.f); float denoising_strength = j.value("denoising_strength", -1.f);
if (denoising_strength >= 0.f) { if (denoising_strength >= 0.f) {
denoising_strength = std::min(denoising_strength, 1.0f); denoising_strength = std::min(denoising_strength, 1.0f);
@ -976,6 +1052,16 @@ int main(int argc, const char** argv) {
} }
} }
if (j.contains("extra_images") && j["extra_images"].is_array()) {
for (auto extra_image : j["extra_images"]) {
std::string encoded = extra_image.get<std::string>();
sd_image_t tmp_image = {(uint32_t)gen_params.width, (uint32_t)gen_params.height, 3, nullptr};
if (decode_image(tmp_image, encoded)) {
ref_images.push_back(tmp_image);
}
}
}
sd_img_gen_params_t img_gen_params = { sd_img_gen_params_t img_gen_params = {
sd_loras.data(), sd_loras.data(),
static_cast<uint32_t>(sd_loras.size()), static_cast<uint32_t>(sd_loras.size()),
@ -988,8 +1074,8 @@ int main(int argc, const char** argv) {
gen_params.auto_resize_ref_image, gen_params.auto_resize_ref_image,
gen_params.increase_ref_index, gen_params.increase_ref_index,
mask_image, mask_image,
gen_params.width, get_resolved_width(),
gen_params.height, get_resolved_height(),
gen_params.sample_params, gen_params.sample_params,
gen_params.strength, gen_params.strength,
gen_params.seed, gen_params.seed,
@ -1039,6 +1125,7 @@ int main(int argc, const char** argv) {
std::string b64 = base64_encode(image_bytes); std::string b64 = base64_encode(image_bytes);
out["images"].push_back(b64); out["images"].push_back(b64);
} }
free_results(results, num_results);
res.set_content(out.dump(), "application/json"); res.set_content(out.dump(), "application/json");
res.status = 200; res.status = 200;

View File

@ -1,4 +1,4 @@
for f in *.cpp *.h *.hpp examples/cli/*.cpp examples/common/*.hpp examples/cli/*.h examples/server/*.cpp; do for f in src/*.cpp src/*.h src/*.hpp src/vocab/*.h src/vocab/*.cpp examples/cli/*.cpp examples/common/*.hpp examples/cli/*.h examples/server/*.cpp; do
[[ "$f" == vocab* ]] && continue [[ "$f" == vocab* ]] && continue
echo "formatting '$f'" echo "formatting '$f'"
# if [ "$f" != "stable-diffusion.h" ]; then # if [ "$f" != "stable-diffusion.h" ]; then

View File

@ -201,7 +201,6 @@ typedef struct {
bool chroma_use_t5_mask; bool chroma_use_t5_mask;
int chroma_t5_mask_pad; int chroma_t5_mask_pad;
bool qwen_image_zero_cond_t; bool qwen_image_zero_cond_t;
float flow_shift;
} sd_ctx_params_t; } sd_ctx_params_t;
typedef struct { typedef struct {
@ -235,6 +234,7 @@ typedef struct {
int shifted_timestep; int shifted_timestep;
float* custom_sigmas; float* custom_sigmas;
int custom_sigmas_count; int custom_sigmas_count;
float flow_shift;
} sd_sample_params_t; } sd_sample_params_t;
typedef struct { typedef struct {
@ -251,6 +251,7 @@ enum sd_cache_mode_t {
SD_CACHE_DBCACHE, SD_CACHE_DBCACHE,
SD_CACHE_TAYLORSEER, SD_CACHE_TAYLORSEER,
SD_CACHE_CACHE_DIT, SD_CACHE_CACHE_DIT,
SD_CACHE_SPECTRUM,
}; };
typedef struct { typedef struct {
@ -271,6 +272,13 @@ typedef struct {
int taylorseer_skip_interval; int taylorseer_skip_interval;
const char* scm_mask; const char* scm_mask;
bool scm_policy_dynamic; bool scm_policy_dynamic;
float spectrum_w;
int spectrum_m;
float spectrum_lam;
int spectrum_window_size;
float spectrum_flex_window;
int spectrum_warmup_steps;
float spectrum_stop_percent;
} sd_cache_params_t; } sd_cache_params_t;
typedef struct { typedef struct {

View File

@ -1,226 +0,0 @@
#ifndef __PREPROCESSING_HPP__
#define __PREPROCESSING_HPP__
#include "ggml_extend.hpp"
#define M_PI_ 3.14159265358979323846f
void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) {
struct ggml_init_params params;
params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
params.mem_buffer = nullptr;
params.no_alloc = false;
struct ggml_context* ctx0 = ggml_init(params);
struct ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
ggml_fp32_to_fp16_row((float*)kernel->data, (ggml_fp16_t*)kernel_fp16->data, ggml_nelements(kernel));
ggml_tensor* h = ggml_conv_2d(ctx0, kernel_fp16, input, 1, 1, padding, padding, 1, 1);
ggml_cgraph* gf = ggml_new_graph(ctx0);
ggml_build_forward_expand(gf, ggml_cpy(ctx0, h, output));
ggml_graph_compute_with_ctx(ctx0, gf, 1);
ggml_free(ctx0);
}
void gaussian_kernel(struct ggml_tensor* kernel) {
int ks_mid = static_cast<int>(kernel->ne[0] / 2);
float sigma = 1.4f;
float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f));
for (int y = 0; y < kernel->ne[0]; y++) {
float gx = static_cast<float>(-ks_mid + y);
for (int x = 0; x < kernel->ne[1]; x++) {
float gy = static_cast<float>(-ks_mid + x);
float k_ = expf(-((gx * gx + gy * gy) / (2.0f * powf(sigma, 2.0f)))) * normal;
ggml_ext_tensor_set_f32(kernel, k_, x, y);
}
}
}
void grayscale(struct ggml_tensor* rgb_img, struct ggml_tensor* grayscale) {
for (int iy = 0; iy < rgb_img->ne[1]; iy++) {
for (int ix = 0; ix < rgb_img->ne[0]; ix++) {
float r = ggml_ext_tensor_get_f32(rgb_img, ix, iy);
float g = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 1);
float b = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 2);
float gray = 0.2989f * r + 0.5870f * g + 0.1140f * b;
ggml_ext_tensor_set_f32(grayscale, gray, ix, iy);
}
}
}
void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) {
int n_elements = static_cast<int>(ggml_nelements(h));
float* dx = (float*)x->data;
float* dy = (float*)y->data;
float* dh = (float*)h->data;
for (int i = 0; i < n_elements; i++) {
dh[i] = sqrtf(dx[i] * dx[i] + dy[i] * dy[i]);
}
}
void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) {
int n_elements = static_cast<int>(ggml_nelements(h));
float* dx = (float*)x->data;
float* dy = (float*)y->data;
float* dh = (float*)h->data;
for (int i = 0; i < n_elements; i++) {
dh[i] = atan2f(dy[i], dx[i]);
}
}
void normalize_tensor(struct ggml_tensor* g) {
int n_elements = static_cast<int>(ggml_nelements(g));
float* dg = (float*)g->data;
float max = -INFINITY;
for (int i = 0; i < n_elements; i++) {
max = dg[i] > max ? dg[i] : max;
}
max = 1.0f / max;
for (int i = 0; i < n_elements; i++) {
dg[i] *= max;
}
}
void non_max_supression(struct ggml_tensor* result, struct ggml_tensor* G, struct ggml_tensor* D) {
for (int iy = 1; iy < result->ne[1] - 1; iy++) {
for (int ix = 1; ix < result->ne[0] - 1; ix++) {
float angle = ggml_ext_tensor_get_f32(D, ix, iy) * 180.0f / M_PI_;
angle = angle < 0.0f ? angle += 180.0f : angle;
float q = 1.0f;
float r = 1.0f;
// angle 0
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180)) {
q = ggml_ext_tensor_get_f32(G, ix, iy + 1);
r = ggml_ext_tensor_get_f32(G, ix, iy - 1);
}
// angle 45
else if (22.5f >= angle && angle < 67.5f) {
q = ggml_ext_tensor_get_f32(G, ix + 1, iy - 1);
r = ggml_ext_tensor_get_f32(G, ix - 1, iy + 1);
}
// angle 90
else if (67.5f >= angle && angle < 112.5) {
q = ggml_ext_tensor_get_f32(G, ix + 1, iy);
r = ggml_ext_tensor_get_f32(G, ix - 1, iy);
}
// angle 135
else if (112.5 >= angle && angle < 157.5f) {
q = ggml_ext_tensor_get_f32(G, ix - 1, iy - 1);
r = ggml_ext_tensor_get_f32(G, ix + 1, iy + 1);
}
float cur = ggml_ext_tensor_get_f32(G, ix, iy);
if ((cur >= q) && (cur >= r)) {
ggml_ext_tensor_set_f32(result, cur, ix, iy);
} else {
ggml_ext_tensor_set_f32(result, 0.0f, ix, iy);
}
}
}
}
void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) {
int n_elements = static_cast<int>(ggml_nelements(img));
float* imd = (float*)img->data;
float max = -INFINITY;
for (int i = 0; i < n_elements; i++) {
max = imd[i] > max ? imd[i] : max;
}
float ht = max * high_threshold;
float lt = ht * low_threshold;
for (int i = 0; i < n_elements; i++) {
float img_v = imd[i];
if (img_v >= ht) { // strong pixel
imd[i] = strong;
} else if (img_v <= ht && img_v >= lt) { // strong pixel
imd[i] = weak;
}
}
for (int iy = 0; iy < img->ne[1]; iy++) {
for (int ix = 0; ix < img->ne[0]; ix++) {
if (ix >= 3 && ix <= img->ne[0] - 3 && iy >= 3 && iy <= img->ne[1] - 3) {
ggml_ext_tensor_set_f32(img, ggml_ext_tensor_get_f32(img, ix, iy), ix, iy);
} else {
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
}
}
}
// hysteresis
for (int iy = 1; iy < img->ne[1] - 1; iy++) {
for (int ix = 1; ix < img->ne[0] - 1; ix++) {
float imd_v = ggml_ext_tensor_get_f32(img, ix, iy);
if (imd_v == weak) {
if (ggml_ext_tensor_get_f32(img, ix + 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix + 1, iy) == strong ||
ggml_ext_tensor_get_f32(img, ix, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix, iy + 1) == strong ||
ggml_ext_tensor_get_f32(img, ix - 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix - 1, iy) == strong) {
ggml_ext_tensor_set_f32(img, strong, ix, iy);
} else {
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
}
}
}
}
}
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
struct ggml_init_params params;
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
params.mem_buffer = nullptr;
params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params);
if (!work_ctx) {
LOG_ERROR("ggml_init() failed");
return false;
}
float kX[9] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
float kY[9] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1};
// generate kernel
int kernel_size = 5;
struct ggml_tensor* gkernel = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, kernel_size, kernel_size, 1, 1);
struct ggml_tensor* sf_kx = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
memcpy(sf_kx->data, kX, ggml_nbytes(sf_kx));
struct ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
gaussian_kernel(gkernel);
struct ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
struct ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
struct ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
struct ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
struct ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
struct ggml_tensor* tetha = ggml_dup_tensor(work_ctx, image_gray);
sd_image_to_ggml_tensor(img, image);
grayscale(image, image_gray);
convolve(image_gray, image_gray, gkernel, 2);
convolve(image_gray, iX, sf_kx, 1);
convolve(image_gray, iY, sf_ky, 1);
prop_hypot(iX, iY, G);
normalize_tensor(G);
prop_arctan2(iX, iY, tetha);
non_max_supression(image_gray, G, tetha);
threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong);
// to RGB channels
for (uint32_t iy = 0; iy < img.height; iy++) {
for (uint32_t ix = 0; ix < img.width; ix++) {
float gray = ggml_ext_tensor_get_f32(image_gray, ix, iy);
gray = inverse ? 1.0f - gray : gray;
ggml_ext_tensor_set_f32(image, gray, ix, iy);
ggml_ext_tensor_set_f32(image, gray, ix, iy, 1);
ggml_ext_tensor_set_f32(image, gray, ix, iy, 2);
}
}
ggml_tensor_to_sd_image(image, img.data);
ggml_free(work_ctx);
return true;
}
#endif // __PREPROCESSING_HPP__

View File

@ -1,88 +1,88 @@
import os import os
import sys import sys
import numpy as np import numpy as np
import torch import torch
from diffusers.utils import load_image from diffusers.utils import load_image
# pip install insightface==0.7.3 # pip install insightface==0.7.3
from insightface.app import FaceAnalysis from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image from insightface.data import get_image as ins_get_image
from safetensors.torch import save_file from safetensors.torch import save_file
### ###
# https://github.com/cubiq/ComfyUI_IPAdapter_plus/issues/165#issue-2055829543 # https://github.com/cubiq/ComfyUI_IPAdapter_plus/issues/165#issue-2055829543
### ###
class FaceAnalysis2(FaceAnalysis): class FaceAnalysis2(FaceAnalysis):
# NOTE: allows setting det_size for each detection call. # NOTE: allows setting det_size for each detection call.
# the model allows it but the wrapping code from insightface # the model allows it but the wrapping code from insightface
# doesn't show it, and people end up loading duplicate models # doesn't show it, and people end up loading duplicate models
# for different sizes where there is absolutely no need to # for different sizes where there is absolutely no need to
def get(self, img, max_num=0, det_size=(640, 640)): def get(self, img, max_num=0, det_size=(640, 640)):
if det_size is not None: if det_size is not None:
self.det_model.input_size = det_size self.det_model.input_size = det_size
return super().get(img, max_num) return super().get(img, max_num)
def analyze_faces(face_analysis: FaceAnalysis, img_data: np.ndarray, det_size=(640, 640)): def analyze_faces(face_analysis: FaceAnalysis, img_data: np.ndarray, det_size=(640, 640)):
# NOTE: try detect faces, if no faces detected, lower det_size until it does # NOTE: try detect faces, if no faces detected, lower det_size until it does
detection_sizes = [None] + [(size, size) for size in range(640, 256, -64)] + [(256, 256)] detection_sizes = [None] + [(size, size) for size in range(640, 256, -64)] + [(256, 256)]
for size in detection_sizes: for size in detection_sizes:
faces = face_analysis.get(img_data, det_size=size) faces = face_analysis.get(img_data, det_size=size)
if len(faces) > 0: if len(faces) > 0:
return faces return faces
return [] return []
if __name__ == "__main__": if __name__ == "__main__":
#face_detector = FaceAnalysis2(providers=['CUDAExecutionProvider'], allowed_modules=['detection', 'recognition']) #face_detector = FaceAnalysis2(providers=['CUDAExecutionProvider'], allowed_modules=['detection', 'recognition'])
face_detector = FaceAnalysis2(providers=['CPUExecutionProvider'], allowed_modules=['detection', 'recognition']) face_detector = FaceAnalysis2(providers=['CPUExecutionProvider'], allowed_modules=['detection', 'recognition'])
face_detector.prepare(ctx_id=0, det_size=(640, 640)) face_detector.prepare(ctx_id=0, det_size=(640, 640))
#input_folder_name = './scarletthead_woman' #input_folder_name = './scarletthead_woman'
input_folder_name = sys.argv[1] input_folder_name = sys.argv[1]
image_basename_list = os.listdir(input_folder_name) image_basename_list = os.listdir(input_folder_name)
image_path_list = sorted([os.path.join(input_folder_name, basename) for basename in image_basename_list]) image_path_list = sorted([os.path.join(input_folder_name, basename) for basename in image_basename_list])
input_id_images = [] input_id_images = []
for image_path in image_path_list: for image_path in image_path_list:
input_id_images.append(load_image(image_path)) input_id_images.append(load_image(image_path))
id_embed_list = [] id_embed_list = []
for img in input_id_images: for img in input_id_images:
img = np.array(img) img = np.array(img)
img = img[:, :, ::-1] img = img[:, :, ::-1]
faces = analyze_faces(face_detector, img) faces = analyze_faces(face_detector, img)
if len(faces) > 0: if len(faces) > 0:
id_embed_list.append(torch.from_numpy((faces[0]['embedding']))) id_embed_list.append(torch.from_numpy((faces[0]['embedding'])))
if len(id_embed_list) == 0: if len(id_embed_list) == 0:
raise ValueError(f"No face detected in input image pool") raise ValueError(f"No face detected in input image pool")
id_embeds = torch.stack(id_embed_list) id_embeds = torch.stack(id_embed_list)
# for r in id_embeds: # for r in id_embeds:
# print(r) # print(r)
# #torch.save(id_embeds, input_folder_name+'/id_embeds.pt'); # #torch.save(id_embeds, input_folder_name+'/id_embeds.pt');
# weights = dict() # weights = dict()
# weights["id_embeds"] = id_embeds # weights["id_embeds"] = id_embeds
# save_file(weights, input_folder_name+'/id_embeds.safetensors') # save_file(weights, input_folder_name+'/id_embeds.safetensors')
binary_data = id_embeds.numpy().tobytes() binary_data = id_embeds.numpy().tobytes()
two = 4 two = 4
zero = 0 zero = 0
one = 1 one = 1
tensor_name = "id_embeds" tensor_name = "id_embeds"
# Write binary data to a file # Write binary data to a file
with open(input_folder_name+'/id_embeds.bin', "wb") as f: with open(input_folder_name+'/id_embeds.bin', "wb") as f:
f.write(two.to_bytes(4, byteorder='little')) f.write(two.to_bytes(4, byteorder='little'))
f.write((len(tensor_name)).to_bytes(4, byteorder='little')) f.write((len(tensor_name)).to_bytes(4, byteorder='little'))
f.write(zero.to_bytes(4, byteorder='little')) f.write(zero.to_bytes(4, byteorder='little'))
f.write((id_embeds.shape[1]).to_bytes(4, byteorder='little')) f.write((id_embeds.shape[1]).to_bytes(4, byteorder='little'))
f.write((id_embeds.shape[0]).to_bytes(4, byteorder='little')) f.write((id_embeds.shape[0]).to_bytes(4, byteorder='little'))
f.write(one.to_bytes(4, byteorder='little')) f.write(one.to_bytes(4, byteorder='little'))
f.write(one.to_bytes(4, byteorder='little')) f.write(one.to_bytes(4, byteorder='little'))
f.write(tensor_name.encode('ascii')) f.write(tensor_name.encode('ascii'))
f.write(binary_data) f.write(binary_data)

683
src/anima.hpp Normal file
View File

@ -0,0 +1,683 @@
#ifndef __ANIMA_HPP__
#define __ANIMA_HPP__
#include <cmath>
#include <memory>
#include <utility>
#include <vector>
#include "common_block.hpp"
#include "flux.hpp"
#include "rope.hpp"
namespace Anima {
constexpr int ANIMA_GRAPH_SIZE = 65536;
__STATIC_INLINE__ ggml_tensor* apply_gate(ggml_context* ctx,
ggml_tensor* x,
ggml_tensor* gate) {
gate = ggml_reshape_3d(ctx, gate, gate->ne[0], 1, gate->ne[1]); // [N, 1, C]
return ggml_mul(ctx, x, gate);
}
struct XEmbedder : public GGMLBlock {
public:
XEmbedder(int64_t in_dim, int64_t out_dim) {
blocks["proj.1"] = std::make_shared<Linear>(in_dim, out_dim, false);
}
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj.1"]);
return proj->forward(ctx, x);
}
};
struct TimestepEmbedder : public GGMLBlock {
public:
TimestepEmbedder(int64_t in_dim, int64_t out_dim) {
blocks["1.linear_1"] = std::make_shared<Linear>(in_dim, in_dim, false);
blocks["1.linear_2"] = std::make_shared<Linear>(in_dim, out_dim, false);
}
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_1"]);
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_2"]);
x = linear_1->forward(ctx, x);
x = ggml_silu_inplace(ctx->ggml_ctx, x);
x = linear_2->forward(ctx, x);
return x;
}
};
struct AdaLayerNormZero : public GGMLBlock {
protected:
int64_t in_features;
public:
AdaLayerNormZero(int64_t in_features, int64_t hidden_features = 256)
: in_features(in_features) {
blocks["norm"] = std::make_shared<LayerNorm>(in_features, 1e-6f, false, false);
blocks["1"] = std::make_shared<Linear>(in_features, hidden_features, false);
blocks["2"] = std::make_shared<Linear>(hidden_features, 3 * in_features, false);
}
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
ggml_tensor* hidden_states,
ggml_tensor* embedded_timestep,
ggml_tensor* temb = nullptr) {
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
auto emb = ggml_silu(ctx->ggml_ctx, embedded_timestep);
emb = linear_1->forward(ctx, emb);
emb = linear_2->forward(ctx, emb); // [N, 3*C]
if (temb != nullptr) {
emb = ggml_add(ctx->ggml_ctx, emb, temb);
}
auto emb_chunks = ggml_ext_chunk(ctx->ggml_ctx, emb, 3, 0);
auto shift = emb_chunks[0];
auto scale = emb_chunks[1];
auto gate = emb_chunks[2];
auto x = norm->forward(ctx, hidden_states);
x = Flux::modulate(ctx->ggml_ctx, x, shift, scale);
return {x, gate};
}
};
struct AdaLayerNorm : public GGMLBlock {
protected:
int64_t embedding_dim;
public:
AdaLayerNorm(int64_t in_features, int64_t hidden_features = 256)
: embedding_dim(in_features) {
blocks["norm"] = std::make_shared<LayerNorm>(in_features, 1e-6f, false, false);
blocks["1"] = std::make_shared<Linear>(in_features, hidden_features, false);
blocks["2"] = std::make_shared<Linear>(hidden_features, 2 * in_features, false);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* hidden_states,
ggml_tensor* embedded_timestep,
ggml_tensor* temb = nullptr) {
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
auto emb = ggml_silu(ctx->ggml_ctx, embedded_timestep);
emb = linear_1->forward(ctx, emb);
emb = linear_2->forward(ctx, emb); // [N, 2*C]
if (temb != nullptr) {
auto temb_2c = ggml_view_2d(ctx->ggml_ctx, temb, 2 * embedding_dim, temb->ne[1], temb->nb[1], 0);
emb = ggml_add(ctx->ggml_ctx, emb, temb_2c);
}
auto emb_chunks = ggml_ext_chunk(ctx->ggml_ctx, emb, 2, 0);
auto shift = emb_chunks[0];
auto scale = emb_chunks[1];
auto x = norm->forward(ctx, hidden_states);
x = Flux::modulate(ctx->ggml_ctx, x, shift, scale);
return x;
}
};
struct AnimaAttention : public GGMLBlock {
protected:
int64_t num_heads;
int64_t head_dim;
std::string out_proj_name;
public:
AnimaAttention(int64_t query_dim,
int64_t context_dim,
int64_t num_heads,
int64_t head_dim,
const std::string& out_proj_name = "output_proj")
: num_heads(num_heads), head_dim(head_dim), out_proj_name(out_proj_name) {
int64_t inner_dim = num_heads * head_dim;
blocks["q_proj"] = std::make_shared<Linear>(query_dim, inner_dim, false);
blocks["k_proj"] = std::make_shared<Linear>(context_dim, inner_dim, false);
blocks["v_proj"] = std::make_shared<Linear>(context_dim, inner_dim, false);
blocks["q_norm"] = std::make_shared<RMSNorm>(head_dim, 1e-6f);
blocks["k_norm"] = std::make_shared<RMSNorm>(head_dim, 1e-6f);
blocks[this->out_proj_name] = std::make_shared<Linear>(inner_dim, query_dim, false);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* hidden_states,
ggml_tensor* encoder_hidden_states = nullptr,
ggml_tensor* pe_q = nullptr,
ggml_tensor* pe_k = nullptr) {
if (encoder_hidden_states == nullptr) {
encoder_hidden_states = hidden_states;
}
auto q_proj = std::dynamic_pointer_cast<Linear>(blocks["q_proj"]);
auto k_proj = std::dynamic_pointer_cast<Linear>(blocks["k_proj"]);
auto v_proj = std::dynamic_pointer_cast<Linear>(blocks["v_proj"]);
auto q_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["q_norm"]);
auto k_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["k_norm"]);
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks[out_proj_name]);
auto q = q_proj->forward(ctx, hidden_states);
auto k = k_proj->forward(ctx, encoder_hidden_states);
auto v = v_proj->forward(ctx, encoder_hidden_states);
int64_t N = q->ne[2];
int64_t L_q = q->ne[1];
int64_t L_k = k->ne[1];
auto q4 = ggml_reshape_4d(ctx->ggml_ctx, q, head_dim, num_heads, L_q, N); // [N, L_q, H, D]
auto k4 = ggml_reshape_4d(ctx->ggml_ctx, k, head_dim, num_heads, L_k, N); // [N, L_k, H, D]
auto v4 = ggml_reshape_4d(ctx->ggml_ctx, v, head_dim, num_heads, L_k, N); // [N, L_k, H, D]
q4 = q_norm->forward(ctx, q4);
k4 = k_norm->forward(ctx, k4);
ggml_tensor* attn_out = nullptr;
if (pe_q != nullptr || pe_k != nullptr) {
if (pe_q == nullptr) {
pe_q = pe_k;
}
if (pe_k == nullptr) {
pe_k = pe_q;
}
auto q_rope = Rope::apply_rope(ctx->ggml_ctx, q4, pe_q, false);
auto k_rope = Rope::apply_rope(ctx->ggml_ctx, k4, pe_k, false);
attn_out = ggml_ext_attention_ext(ctx->ggml_ctx,
ctx->backend,
q_rope,
k_rope,
v4,
num_heads,
nullptr,
true,
ctx->flash_attn_enabled);
} else {
auto q_flat = ggml_reshape_3d(ctx->ggml_ctx, q4, head_dim * num_heads, L_q, N);
auto k_flat = ggml_reshape_3d(ctx->ggml_ctx, k4, head_dim * num_heads, L_k, N);
attn_out = ggml_ext_attention_ext(ctx->ggml_ctx,
ctx->backend,
q_flat,
k_flat,
v,
num_heads,
nullptr,
false,
ctx->flash_attn_enabled);
}
return out_proj->forward(ctx, attn_out);
}
};
struct AnimaMLP : public GGMLBlock {
public:
AnimaMLP(int64_t dim, int64_t hidden_dim) {
blocks["layer1"] = std::make_shared<Linear>(dim, hidden_dim, false);
blocks["layer2"] = std::make_shared<Linear>(hidden_dim, dim, false);
}
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto layer1 = std::dynamic_pointer_cast<Linear>(blocks["layer1"]);
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["layer2"]);
x = layer1->forward(ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
x = layer2->forward(ctx, x);
return x;
}
};
struct AdapterMLP : public GGMLBlock {
public:
AdapterMLP(int64_t dim, int64_t hidden_dim) {
blocks["0"] = std::make_shared<Linear>(dim, hidden_dim, true);
blocks["2"] = std::make_shared<Linear>(hidden_dim, dim, true);
}
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto layer0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
x = layer0->forward(ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
x = layer2->forward(ctx, x);
return x;
}
};
struct LLMAdapterBlock : public GGMLBlock {
public:
LLMAdapterBlock(int64_t model_dim = 1024, int64_t source_dim = 1024, int64_t num_heads = 16, int64_t head_dim = 64) {
blocks["norm_self_attn"] = std::make_shared<RMSNorm>(model_dim, 1e-6f);
blocks["self_attn"] = std::make_shared<AnimaAttention>(model_dim, model_dim, num_heads, head_dim, "o_proj");
blocks["norm_cross_attn"] = std::make_shared<RMSNorm>(model_dim, 1e-6f);
blocks["cross_attn"] = std::make_shared<AnimaAttention>(model_dim, source_dim, num_heads, head_dim, "o_proj");
blocks["norm_mlp"] = std::make_shared<RMSNorm>(model_dim, 1e-6f);
blocks["mlp"] = std::make_shared<AdapterMLP>(model_dim, model_dim * 4);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* x,
ggml_tensor* context,
ggml_tensor* target_pe,
ggml_tensor* context_pe) {
auto norm_self_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_self_attn"]);
auto self_attn = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
auto norm_cross_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_cross_attn"]);
auto cross_attn = std::dynamic_pointer_cast<AnimaAttention>(blocks["cross_attn"]);
auto norm_mlp = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_mlp"]);
auto mlp = std::dynamic_pointer_cast<AdapterMLP>(blocks["mlp"]);
auto h = norm_self_attn->forward(ctx, x);
h = self_attn->forward(ctx, h, nullptr, target_pe, target_pe);
x = ggml_add(ctx->ggml_ctx, x, h);
h = norm_cross_attn->forward(ctx, x);
h = cross_attn->forward(ctx, h, context, target_pe, context_pe);
x = ggml_add(ctx->ggml_ctx, x, h);
h = norm_mlp->forward(ctx, x);
h = mlp->forward(ctx, h);
x = ggml_add(ctx->ggml_ctx, x, h);
return x;
}
};
struct LLMAdapter : public GGMLBlock {
protected:
int num_layers;
public:
LLMAdapter(int64_t source_dim = 1024,
int64_t target_dim = 1024,
int64_t model_dim = 1024,
int num_layers = 6,
int num_heads = 16)
: num_layers(num_layers) {
int64_t head_dim = model_dim / num_heads;
blocks["embed"] = std::make_shared<Embedding>(32128, target_dim);
for (int i = 0; i < num_layers; i++) {
blocks["blocks." + std::to_string(i)] =
std::make_shared<LLMAdapterBlock>(model_dim, source_dim, num_heads, head_dim);
}
blocks["out_proj"] = std::make_shared<Linear>(model_dim, target_dim, true);
blocks["norm"] = std::make_shared<RMSNorm>(target_dim, 1e-6f);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* source_hidden_states,
ggml_tensor* target_input_ids,
ggml_tensor* target_pe,
ggml_tensor* source_pe) {
GGML_ASSERT(target_input_ids != nullptr);
if (ggml_n_dims(target_input_ids) == 1) {
target_input_ids = ggml_reshape_2d(ctx->ggml_ctx, target_input_ids, target_input_ids->ne[0], 1);
}
auto embed = std::dynamic_pointer_cast<Embedding>(blocks["embed"]);
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out_proj"]);
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
auto x = embed->forward(ctx, target_input_ids); // [N, target_len, target_dim]
for (int i = 0; i < num_layers; i++) {
auto block = std::dynamic_pointer_cast<LLMAdapterBlock>(blocks["blocks." + std::to_string(i)]);
x = block->forward(ctx, x, source_hidden_states, target_pe, source_pe);
}
x = out_proj->forward(ctx, x);
x = norm->forward(ctx, x);
return x;
}
};
struct TransformerBlock : public GGMLBlock {
public:
TransformerBlock(int64_t hidden_size,
int64_t text_embed_dim,
int64_t num_heads,
int64_t head_dim,
int64_t mlp_ratio = 4,
int64_t adaln_lora_dim = 256) {
blocks["adaln_modulation_self_attn"] = std::make_shared<AdaLayerNormZero>(hidden_size, adaln_lora_dim);
blocks["self_attn"] = std::make_shared<AnimaAttention>(hidden_size, hidden_size, num_heads, head_dim);
blocks["adaln_modulation_cross_attn"] = std::make_shared<AdaLayerNormZero>(hidden_size, adaln_lora_dim);
blocks["cross_attn"] = std::make_shared<AnimaAttention>(hidden_size, text_embed_dim, num_heads, head_dim);
blocks["adaln_modulation_mlp"] = std::make_shared<AdaLayerNormZero>(hidden_size, adaln_lora_dim);
blocks["mlp"] = std::make_shared<AnimaMLP>(hidden_size, hidden_size * mlp_ratio);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* hidden_states,
ggml_tensor* encoder_hidden_states,
ggml_tensor* embedded_timestep,
ggml_tensor* temb,
ggml_tensor* image_pe) {
auto norm1 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_self_attn"]);
auto attn1 = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
auto norm2 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_cross_attn"]);
auto attn2 = std::dynamic_pointer_cast<AnimaAttention>(blocks["cross_attn"]);
auto norm3 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_mlp"]);
auto mlp = std::dynamic_pointer_cast<AnimaMLP>(blocks["mlp"]);
auto [normed1, gate1] = norm1->forward(ctx, hidden_states, embedded_timestep, temb);
auto h = attn1->forward(ctx, normed1, nullptr, image_pe, image_pe);
hidden_states = ggml_add(ctx->ggml_ctx, hidden_states, apply_gate(ctx->ggml_ctx, h, gate1));
auto [normed2, gate2] = norm2->forward(ctx, hidden_states, embedded_timestep, temb);
h = attn2->forward(ctx, normed2, encoder_hidden_states, nullptr, nullptr);
hidden_states = ggml_add(ctx->ggml_ctx, hidden_states, apply_gate(ctx->ggml_ctx, h, gate2));
auto [normed3, gate3] = norm3->forward(ctx, hidden_states, embedded_timestep, temb);
h = mlp->forward(ctx, normed3);
hidden_states = ggml_add(ctx->ggml_ctx, hidden_states, apply_gate(ctx->ggml_ctx, h, gate3));
return hidden_states;
}
};
struct FinalLayer : public GGMLBlock {
protected:
int64_t hidden_size;
int64_t patch_size;
int64_t out_channels;
public:
FinalLayer(int64_t hidden_size, int64_t patch_size, int64_t out_channels)
: hidden_size(hidden_size), patch_size(patch_size), out_channels(out_channels) {
blocks["adaln_modulation"] = std::make_shared<AdaLayerNorm>(hidden_size, 256);
blocks["linear"] = std::make_shared<Linear>(hidden_size, patch_size * patch_size * out_channels, false);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* hidden_states,
ggml_tensor* embedded_timestep,
ggml_tensor* temb) {
auto adaln = std::dynamic_pointer_cast<AdaLayerNorm>(blocks["adaln_modulation"]);
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
hidden_states = adaln->forward(ctx, hidden_states, embedded_timestep, temb);
hidden_states = linear->forward(ctx, hidden_states);
return hidden_states;
}
};
struct AnimaNet : public GGMLBlock {
public:
int64_t in_channels = 16;
int64_t out_channels = 16;
int64_t hidden_size = 2048;
int64_t text_embed_dim = 1024;
int64_t num_heads = 16;
int64_t head_dim = 128;
int patch_size = 2;
int64_t num_layers = 28;
std::vector<int> axes_dim = {44, 42, 42};
int theta = 10000;
public:
AnimaNet() = default;
explicit AnimaNet(int64_t num_layers)
: num_layers(num_layers) {
blocks["x_embedder"] = std::make_shared<XEmbedder>((in_channels + 1) * patch_size * patch_size, hidden_size);
blocks["t_embedder"] = std::make_shared<TimestepEmbedder>(hidden_size, hidden_size * 3);
blocks["t_embedding_norm"] = std::make_shared<RMSNorm>(hidden_size, 1e-6f);
for (int i = 0; i < num_layers; i++) {
blocks["blocks." + std::to_string(i)] = std::make_shared<TransformerBlock>(hidden_size,
text_embed_dim,
num_heads,
head_dim);
}
blocks["final_layer"] = std::make_shared<FinalLayer>(hidden_size, patch_size, out_channels);
blocks["llm_adapter"] = std::make_shared<LLMAdapter>(1024, 1024, 1024, 6, 16);
}
ggml_tensor* forward(GGMLRunnerContext* ctx,
ggml_tensor* x,
ggml_tensor* timestep,
ggml_tensor* encoder_hidden_states,
ggml_tensor* image_pe,
ggml_tensor* t5_ids = nullptr,
ggml_tensor* t5_weights = nullptr,
ggml_tensor* adapter_q_pe = nullptr,
ggml_tensor* adapter_k_pe = nullptr) {
GGML_ASSERT(x->ne[3] == 1);
auto x_embedder = std::dynamic_pointer_cast<XEmbedder>(blocks["x_embedder"]);
auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]);
auto t_embedding_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["t_embedding_norm"]);
auto final_layer = std::dynamic_pointer_cast<FinalLayer>(blocks["final_layer"]);
auto llm_adapter = std::dynamic_pointer_cast<LLMAdapter>(blocks["llm_adapter"]);
int64_t W = x->ne[0];
int64_t H = x->ne[1];
auto padding_mask = ggml_ext_zeros(ctx->ggml_ctx, x->ne[0], x->ne[1], 1, x->ne[3]);
x = ggml_concat(ctx->ggml_ctx, x, padding_mask, 2); // [N, C + 1, H, W]
x = DiT::pad_and_patchify(ctx, x, patch_size, patch_size); // [N, h*w, (C+1)*ph*pw]
x = x_embedder->forward(ctx, x);
auto timestep_proj = ggml_ext_timestep_embedding(ctx->ggml_ctx, timestep, static_cast<int>(hidden_size));
auto temb = t_embedder->forward(ctx, timestep_proj);
auto embedded_timestep = t_embedding_norm->forward(ctx, timestep_proj);
if (t5_ids != nullptr) {
auto adapted_context = llm_adapter->forward(ctx, encoder_hidden_states, t5_ids, adapter_q_pe, adapter_k_pe);
if (t5_weights != nullptr) {
auto w = t5_weights;
if (ggml_n_dims(w) == 1) {
w = ggml_reshape_3d(ctx->ggml_ctx, w, 1, w->ne[0], 1);
}
w = ggml_repeat_4d(ctx->ggml_ctx, w, adapted_context->ne[0], adapted_context->ne[1], adapted_context->ne[2], 1);
adapted_context = ggml_mul(ctx->ggml_ctx, adapted_context, w);
}
if (adapted_context->ne[1] < 512) {
auto pad_ctx = ggml_ext_zeros(ctx->ggml_ctx,
adapted_context->ne[0],
512 - adapted_context->ne[1],
adapted_context->ne[2],
1);
adapted_context = ggml_concat(ctx->ggml_ctx, adapted_context, pad_ctx, 1);
} else if (adapted_context->ne[1] > 512) {
adapted_context = ggml_ext_slice(ctx->ggml_ctx, adapted_context, 1, 0, 512);
}
encoder_hidden_states = adapted_context;
}
for (int i = 0; i < num_layers; i++) {
auto block = std::dynamic_pointer_cast<TransformerBlock>(blocks["blocks." + std::to_string(i)]);
x = block->forward(ctx, x, encoder_hidden_states, embedded_timestep, temb, image_pe);
}
x = final_layer->forward(ctx, x, embedded_timestep, temb); // [N, h*w, ph*pw*C]
x = DiT::unpatchify_and_crop(ctx->ggml_ctx, x, H, W, patch_size, patch_size, false); // [N, C, H, W]
return x;
}
};
struct AnimaRunner : public GGMLRunner {
public:
std::vector<float> image_pe_vec;
std::vector<float> adapter_q_pe_vec;
std::vector<float> adapter_k_pe_vec;
AnimaNet net;
AnimaRunner(ggml_backend_t backend,
bool offload_params_to_cpu,
const String2TensorStorage& tensor_storage_map = {},
const std::string prefix = "model.diffusion_model")
: GGMLRunner(backend, offload_params_to_cpu) {
int64_t num_layers = 0;
std::string layer_tag = prefix + ".net.blocks.";
for (const auto& kv : tensor_storage_map) {
const std::string& tensor_name = kv.first;
size_t pos = tensor_name.find(layer_tag);
if (pos == std::string::npos) {
continue;
}
size_t start = pos + layer_tag.size();
size_t end = tensor_name.find('.', start);
if (end == std::string::npos) {
continue;
}
int64_t layer_id = atoll(tensor_name.substr(start, end - start).c_str());
num_layers = std::max(num_layers, layer_id + 1);
}
if (num_layers <= 0) {
num_layers = 28;
}
LOG_INFO("anima net layers: %" PRId64, num_layers);
net = AnimaNet(num_layers);
net.init(params_ctx, tensor_storage_map, prefix + ".net");
}
std::string get_desc() override {
return "anima";
}
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
net.get_param_tensors(tensors, prefix + ".net");
}
static std::vector<float> gen_1d_rope_pe_vec(int64_t seq_len, int dim, float theta = 10000.f) {
std::vector<float> pos(seq_len);
for (int64_t i = 0; i < seq_len; i++) {
pos[i] = static_cast<float>(i);
}
auto rope_emb = Rope::rope(pos, dim, theta);
return Rope::flatten(rope_emb);
}
static float calc_ntk_factor(float extrapolation_ratio, int axis_dim) {
if (extrapolation_ratio == 1.0f || axis_dim <= 2) {
return 1.0f;
}
return std::pow(extrapolation_ratio, static_cast<float>(axis_dim) / static_cast<float>(axis_dim - 2));
}
static std::vector<float> gen_anima_image_pe_vec(int bs,
int h,
int w,
int patch_size,
int theta,
const std::vector<int>& axes_dim,
float h_extrapolation_ratio,
float w_extrapolation_ratio,
float t_extrapolation_ratio) {
static const std::vector<ggml_tensor*> empty_ref_latents;
auto ids = Rope::gen_flux_ids(h,
w,
patch_size,
bs,
static_cast<int>(axes_dim.size()),
0,
{},
empty_ref_latents,
false,
1.0f);
std::vector<float> axis_thetas = {
static_cast<float>(theta) * calc_ntk_factor(t_extrapolation_ratio, axes_dim[0]),
static_cast<float>(theta) * calc_ntk_factor(h_extrapolation_ratio, axes_dim[1]),
static_cast<float>(theta) * calc_ntk_factor(w_extrapolation_ratio, axes_dim[2]),
};
return Rope::embed_nd(ids, bs, axis_thetas, axes_dim);
}
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<int32_t>& t5_ids_tensor = {},
const sd::Tensor<float>& t5_weights_tensor = {}) {
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* t5_ids = make_optional_input(t5_ids_tensor);
ggml_tensor* t5_weights = make_optional_input(t5_weights_tensor);
GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
int64_t pad_h = (net.patch_size - x->ne[1] % net.patch_size) % net.patch_size;
int64_t pad_w = (net.patch_size - x->ne[0] % net.patch_size) % net.patch_size;
int64_t h_pad = x->ne[1] + pad_h;
int64_t w_pad = x->ne[0] + pad_w;
image_pe_vec = gen_anima_image_pe_vec(1,
static_cast<int>(h_pad),
static_cast<int>(w_pad),
static_cast<int>(net.patch_size),
net.theta,
net.axes_dim,
4.0f,
4.0f,
1.0f);
int64_t image_pos_len = static_cast<int64_t>(image_pe_vec.size()) / (2 * 2 * (net.head_dim / 2));
auto image_pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, net.head_dim / 2, image_pos_len);
set_backend_tensor_data(image_pe, image_pe_vec.data());
ggml_tensor* adapter_q_pe = nullptr;
ggml_tensor* adapter_k_pe = nullptr;
if (t5_ids != nullptr) {
int64_t target_len = t5_ids->ne[0];
int64_t source_len = context->ne[1];
adapter_q_pe_vec = gen_1d_rope_pe_vec(target_len, 64, 10000.f);
adapter_k_pe_vec = gen_1d_rope_pe_vec(source_len, 64, 10000.f);
int64_t target_pos_len = static_cast<int64_t>(adapter_q_pe_vec.size()) / (2 * 2 * 32);
int64_t source_pos_len = static_cast<int64_t>(adapter_k_pe_vec.size()) / (2 * 2 * 32);
adapter_q_pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, 32, target_pos_len);
adapter_k_pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, 32, source_pos_len);
set_backend_tensor_data(adapter_q_pe, adapter_q_pe_vec.data());
set_backend_tensor_data(adapter_k_pe, adapter_k_pe_vec.data());
}
auto runner_ctx = get_context();
auto out = net.forward(&runner_ctx,
x,
timesteps,
context,
image_pe,
t5_ids,
t5_weights,
adapter_q_pe,
adapter_k_pe);
ggml_build_forward_expand(gf, out);
return gf;
}
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<int32_t>& t5_ids = {},
const sd::Tensor<float>& t5_weights = {}) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, t5_ids, t5_weights);
};
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
};
} // namespace Anima
#endif // __ANIMA_HPP__

View File

@ -1,8 +1,7 @@
#ifndef __VAE_HPP__ #ifndef __AUTO_ENCODER_KL_HPP__
#define __VAE_HPP__ #define __AUTO_ENCODER_KL_HPP__
#include "common.hpp" #include "vae.hpp"
#include "ggml_extend.hpp"
/*================================================== AutoEncoderKL ===================================================*/ /*================================================== AutoEncoderKL ===================================================*/
@ -30,7 +29,7 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// t_emb is always None // t_emb is always None
auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]); auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]);
@ -66,7 +65,7 @@ protected:
int64_t in_channels; int64_t in_channels;
bool use_linear; bool use_linear;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
auto iter = tensor_storage_map.find(prefix + "proj_out.weight"); auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
if (iter != tensor_storage_map.end()) { if (iter != tensor_storage_map.end()) {
if (iter->second.n_dims == 4 && use_linear) { if (iter->second.n_dims == 4 && use_linear) {
@ -102,7 +101,7 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
auto q_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["q"]); auto q_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["q"]);
@ -141,7 +140,7 @@ public:
v = ggml_reshape_3d(ctx->ggml_ctx, v, c, h * w, n); // [N, h * w, in_channels] v = ggml_reshape_3d(ctx->ggml_ctx, v, c, h * w, n); // [N, h * w, in_channels]
} }
h_ = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, true, ctx->flash_attn_enabled); h_ = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled);
if (use_linear) { if (use_linear) {
h_ = proj_out->forward(ctx, h_); // [N, h * w, in_channels] h_ = proj_out->forward(ctx, h_); // [N, h * w, in_channels]
@ -179,8 +178,8 @@ public:
{kernel_padding, 0, 0})); {kernel_padding, 0, 0}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x) override { ggml_tensor* x) override {
// timesteps always None // timesteps always None
// skip_video always False // skip_video always False
// x: [N, IC, IH, IW] // x: [N, IC, IH, IW]
@ -209,7 +208,7 @@ public:
class VideoResnetBlock : public ResnetBlock { class VideoResnetBlock : public ResnetBlock {
protected: protected:
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_storage_map, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_storage_map, GGML_TYPE_F32);
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
} }
@ -228,7 +227,7 @@ public:
blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true)); blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w] // x: [N, in_channels, h, w] aka [b*t, in_channels, h, w]
// return: [N, out_channels, h, w] aka [b*t, out_channels, h, w] // return: [N, out_channels, h, w] aka [b*t, out_channels, h, w]
// t_emb is always None // t_emb is always None
@ -318,7 +317,7 @@ public:
blocks["conv_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(block_in, double_z ? z_channels * 2 : z_channels, {3, 3}, {1, 1}, {1, 1})); blocks["conv_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(block_in, double_z ? z_channels * 2 : z_channels, {3, 3}, {1, 1}, {1, 1}));
} }
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]); auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
@ -436,7 +435,7 @@ public:
blocks["conv_out"] = get_conv_out(block_in, out_ch, {3, 3}, {1, 1}, {1, 1}); blocks["conv_out"] = get_conv_out(block_in, out_ch, {3, 3}, {1, 1}, {1, 1});
} }
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) { virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) {
// z: [N, z_channels, h, w] // z: [N, z_channels, h, w]
// alpha is always 0 // alpha is always 0
// merge_strategy is always learned // merge_strategy is always learned
@ -484,7 +483,7 @@ public:
}; };
// ldm.models.autoencoder.AutoencoderKL // ldm.models.autoencoder.AutoencoderKL
class AutoencodingEngine : public GGMLBlock { class AutoEncoderKLModel : public GGMLBlock {
protected: protected:
SDVersion version; SDVersion version;
bool decode_only = true; bool decode_only = true;
@ -503,7 +502,7 @@ protected:
} dd_config; } dd_config;
public: public:
AutoencodingEngine(SDVersion version = VERSION_SD1, AutoEncoderKLModel(SDVersion version = VERSION_SD1,
bool decode_only = true, bool decode_only = true,
bool use_linear_projection = false, bool use_linear_projection = false,
bool use_video_decoder = false) bool use_video_decoder = false)
@ -550,7 +549,7 @@ public:
} }
} }
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) { ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
// z: [N, z_channels, h, w] // z: [N, z_channels, h, w]
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
// [N, C*p*p, h, w] -> [N, C, h*p, w*p] // [N, C*p*p, h, w] -> [N, C, h*p, w*p]
@ -582,7 +581,7 @@ public:
return h; return h;
} }
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
auto encoder = std::dynamic_pointer_cast<Encoder>(blocks["encoder"]); auto encoder = std::dynamic_pointer_cast<Encoder>(blocks["encoder"]);
@ -611,48 +610,21 @@ public:
} }
return z; return z;
} }
};
struct VAE : public GGMLRunner { int get_encoder_output_channels() {
VAE(ggml_backend_t backend, bool offload_params_to_cpu) int factor = dd_config.double_z ? 2 : 1;
: GGMLRunner(backend, offload_params_to_cpu) {} if (sd_version_is_flux2(version)) {
virtual bool compute(const int n_threads, return dd_config.z_channels * 4;
struct ggml_tensor* z,
bool decode_graph,
struct ggml_tensor** output,
struct ggml_context* output_ctx) = 0;
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) = 0;
virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); };
};
struct FakeVAE : public VAE {
FakeVAE(ggml_backend_t backend, bool offload_params_to_cpu)
: VAE(backend, offload_params_to_cpu) {}
bool compute(const int n_threads,
struct ggml_tensor* z,
bool decode_graph,
struct ggml_tensor** output,
struct ggml_context* output_ctx) override {
if (*output == nullptr && output_ctx != nullptr) {
*output = ggml_dup_tensor(output_ctx, z);
} }
ggml_ext_tensor_iter(z, [&](ggml_tensor* z, int64_t i0, int64_t i1, int64_t i2, int64_t i3) { return dd_config.z_channels * factor;
float value = ggml_ext_tensor_get_f32(z, i0, i1, i2, i3);
ggml_ext_tensor_set_f32(*output, value, i0, i1, i2, i3);
});
return true;
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {}
std::string get_desc() override {
return "fake_vae";
} }
}; };
struct AutoEncoderKL : public VAE { struct AutoEncoderKL : public VAE {
bool decode_only = true; float scale_factor = 1.f;
AutoencodingEngine ae; float shift_factor = 0.f;
bool decode_only = true;
AutoEncoderKLModel ae;
AutoEncoderKL(ggml_backend_t backend, AutoEncoderKL(ggml_backend_t backend,
bool offload_params_to_cpu, bool offload_params_to_cpu,
@ -661,7 +633,23 @@ struct AutoEncoderKL : public VAE {
bool decode_only = false, bool decode_only = false,
bool use_video_decoder = false, bool use_video_decoder = false,
SDVersion version = VERSION_SD1) SDVersion version = VERSION_SD1)
: decode_only(decode_only), VAE(backend, offload_params_to_cpu) { : decode_only(decode_only), VAE(version, backend, offload_params_to_cpu) {
if (sd_version_is_sd1(version) || sd_version_is_sd2(version)) {
scale_factor = 0.18215f;
shift_factor = 0.f;
} else if (sd_version_is_sdxl(version)) {
scale_factor = 0.13025f;
shift_factor = 0.f;
} else if (sd_version_is_sd3(version)) {
scale_factor = 1.5305f;
shift_factor = 0.0609f;
} else if (sd_version_is_flux(version) || sd_version_is_z_image(version)) {
scale_factor = 0.3611f;
shift_factor = 0.1159f;
} else if (sd_version_is_flux2(version)) {
scale_factor = 1.0f;
shift_factor = 0.f;
}
bool use_linear_projection = false; bool use_linear_projection = false;
for (const auto& [name, tensor_storage] : tensor_storage_map) { for (const auto& [name, tensor_storage] : tensor_storage_map) {
if (!starts_with(name, prefix)) { if (!starts_with(name, prefix)) {
@ -674,7 +662,7 @@ struct AutoEncoderKL : public VAE {
break; break;
} }
} }
ae = AutoencodingEngine(version, decode_only, use_linear_projection, use_video_decoder); ae = AutoEncoderKLModel(version, decode_only, use_linear_projection, use_video_decoder);
ae.init(params_ctx, tensor_storage_map, prefix); ae.init(params_ctx, tensor_storage_map, prefix);
} }
@ -693,62 +681,149 @@ struct AutoEncoderKL : public VAE {
return "vae"; return "vae";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {
ae.get_param_tensors(tensors, prefix); ae.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) { ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* z = make_input(z_tensor);
z = to_backend(z);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z); ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> _compute(const int n_threads,
struct ggml_tensor* z, const sd::Tensor<float>& z,
bool decode_graph, bool decode_graph) override {
struct ggml_tensor** output,
struct ggml_context* output_ctx = nullptr) override {
GGML_ASSERT(!decode_only || decode_graph); GGML_ASSERT(!decode_only || decode_graph);
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z, decode_graph); return build_graph(z, decode_graph);
}; };
// ggml_set_f32(z, 0.5f); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z.dim());
// print_ggml_tensor(z); }
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
sd::Tensor<float> gaussian_latent_sample(const sd::Tensor<float>& moments, std::shared_ptr<RNG> rng) {
// ldm.modules.distributions.distributions.DiagonalGaussianDistribution.sample
auto chunks = sd::ops::chunk(moments, 2, 2);
const auto& mean = chunks[0];
const auto& logvar = chunks[1];
sd::Tensor<float> stddev = sd::ops::exp(0.5f * sd::ops::clamp(logvar, -30.0f, 20.0f));
sd::Tensor<float> noise = sd::Tensor<float>::randn_like(mean, rng);
sd::Tensor<float> latents = mean + stddev * noise;
return latents;
}
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
if (sd_version_is_flux2(version)) {
return vae_output;
} else if (version == VERSION_SD1_PIX2PIX) {
return sd::ops::chunk(vae_output, 2, 2)[0];
} else {
return gaussian_latent_sample(vae_output, rng);
}
}
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents, int channel_dim) {
GGML_ASSERT(channel_dim >= 0 && static_cast<size_t>(channel_dim) < static_cast<size_t>(latents.dim()));
if (sd_version_is_flux2(version)) {
GGML_ASSERT(latents.shape()[channel_dim] == 128);
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1);
stats_shape[static_cast<size_t>(channel_dim)] = latents.shape()[channel_dim];
auto mean_tensor = sd::Tensor<float>::from_vector({-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f,
-0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f,
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f,
0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f,
-0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f,
0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f,
0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f,
0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f,
-0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f,
0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f,
-0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f,
0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f,
-0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f,
0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f,
-0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f,
-0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f,
1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f,
1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f,
1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f,
1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f,
1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f,
1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f,
1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f,
1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f,
1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f,
1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f,
1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f,
1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f,
1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
} else {
GGML_ABORT("unknown version %d", version);
}
}
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
if (sd_version_is_flux2(version)) {
int channel_dim = 2;
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim);
return (latents * std_tensor) / scale_factor + mean_tensor;
}
return (latents / scale_factor) + shift_factor;
}
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
if (sd_version_is_flux2(version)) {
int channel_dim = 2;
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim);
return ((latents - mean_tensor) * scale_factor) / std_tensor;
}
return (latents - shift_factor) * scale_factor;
}
int get_encoder_output_channels(int input_channels) {
return ae.get_encoder_output_channels();
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// CPU, x{1, 3, 64, 64}: Pass // CPU, x{1, 3, 64, 64}: Pass
// CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan // CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan
// CPU, x{2, 3, 64, 64}: Wrong result // CPU, x{2, 3, 64, 64}: Wrong result
// CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result // CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2); sd::Tensor<float> x({64, 64, 3, 2});
ggml_set_f32(x, 0.5f); x.fill_(0.5f);
print_ggml_tensor(x); print_sd_tensor(x);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, false, &out, work_ctx); auto out_opt = _compute(8, x, false);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("encode test done in %lldms", t1 - t0); LOG_DEBUG("encode test done in %lldms", t1 - t0);
} }
@ -757,19 +832,21 @@ struct AutoEncoderKL : public VAE {
// CUDA, z{1, 4, 8, 8}: Pass // CUDA, z{1, 4, 8, 8}: Pass
// CPU, z{3, 4, 8, 8}: Wrong result // CPU, z{3, 4, 8, 8}: Wrong result
// CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result // CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1); sd::Tensor<float> z({8, 8, 4, 1});
ggml_set_f32(z, 0.5f); z.fill_(0.5f);
print_ggml_tensor(z); print_sd_tensor(z);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, z, true, &out, work_ctx); auto out_opt = _compute(8, z, true);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("decode test done in %lldms", t1 - t0); LOG_DEBUG("decode test done in %lldms", t1 - t0);
} }
}; };
}; };
#endif #endif // __AUTO_ENCODER_KL_HPP__

View File

@ -8,7 +8,9 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "condition_cache_utils.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct DBCacheConfig { struct DBCacheConfig {
bool enabled = false; bool enabled = false;
@ -603,87 +605,6 @@ inline std::vector<int> generate_scm_mask(
return mask; return mask;
} }
inline std::vector<int> get_scm_preset(const std::string& preset, int total_steps) {
struct Preset {
std::vector<int> compute_bins;
std::vector<int> cache_bins;
};
Preset slow = {{8, 3, 3, 2, 1, 1}, {1, 2, 2, 2, 3}};
Preset medium = {{6, 2, 2, 2, 2, 1}, {1, 3, 3, 3, 3}};
Preset fast = {{6, 1, 1, 1, 1, 1}, {1, 3, 4, 5, 4}};
Preset ultra = {{4, 1, 1, 1, 1}, {2, 5, 6, 7}};
Preset* p = nullptr;
if (preset == "slow" || preset == "s" || preset == "S")
p = &slow;
else if (preset == "medium" || preset == "m" || preset == "M")
p = &medium;
else if (preset == "fast" || preset == "f" || preset == "F")
p = &fast;
else if (preset == "ultra" || preset == "u" || preset == "U")
p = &ultra;
else
return {};
if (total_steps != 28 && total_steps > 0) {
float scale = static_cast<float>(total_steps) / 28.0f;
std::vector<int> scaled_compute, scaled_cache;
for (int v : p->compute_bins) {
scaled_compute.push_back(std::max(1, static_cast<int>(v * scale + 0.5f)));
}
for (int v : p->cache_bins) {
scaled_cache.push_back(std::max(1, static_cast<int>(v * scale + 0.5f)));
}
return generate_scm_mask(scaled_compute, scaled_cache, total_steps);
}
return generate_scm_mask(p->compute_bins, p->cache_bins, total_steps);
}
inline float get_preset_threshold(const std::string& preset) {
if (preset == "slow" || preset == "s" || preset == "S")
return 0.20f;
if (preset == "medium" || preset == "m" || preset == "M")
return 0.25f;
if (preset == "fast" || preset == "f" || preset == "F")
return 0.30f;
if (preset == "ultra" || preset == "u" || preset == "U")
return 0.34f;
return 0.08f;
}
inline int get_preset_warmup(const std::string& preset) {
if (preset == "slow" || preset == "s" || preset == "S")
return 8;
if (preset == "medium" || preset == "m" || preset == "M")
return 6;
if (preset == "fast" || preset == "f" || preset == "F")
return 6;
if (preset == "ultra" || preset == "u" || preset == "U")
return 4;
return 8;
}
inline int get_preset_Fn(const std::string& preset) {
if (preset == "slow" || preset == "s" || preset == "S")
return 8;
if (preset == "medium" || preset == "m" || preset == "M")
return 8;
if (preset == "fast" || preset == "f" || preset == "F")
return 6;
if (preset == "ultra" || preset == "u" || preset == "U")
return 4;
return 8;
}
inline int get_preset_Bn(const std::string& preset) {
(void)preset;
return 0;
}
inline void parse_dbcache_options(const std::string& opts, DBCacheConfig& cfg) { inline void parse_dbcache_options(const std::string& opts, DBCacheConfig& cfg) {
if (opts.empty()) if (opts.empty())
return; return;
@ -852,35 +773,37 @@ struct CacheDitConditionState {
return it != cache_diffs.end() && !it->second.diff.empty(); return it != cache_diffs.end() && !it->second.diff.empty();
} }
void update_cache(const void* cond, const float* input, const float* output, size_t size) { void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
CacheEntry& entry = cache_diffs[cond]; CacheEntry& entry = cache_diffs[cond];
entry.diff.resize(size); if (!sd::store_condition_cache_diff(&entry.diff, input, output)) {
for (size_t i = 0; i < size; i++) { entry.prev_input.clear();
entry.diff[i] = output[i] - input[i]; entry.prev_output.clear();
entry.has_prev = false;
return;
} }
size_t size = static_cast<size_t>(output.numel());
const float* input_data = input.data();
const float* output_data = output.data();
entry.prev_input.resize(size); entry.prev_input.resize(size);
entry.prev_output.resize(size); entry.prev_output.resize(size);
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
entry.prev_input[i] = input[i]; entry.prev_input[i] = input_data[i];
entry.prev_output[i] = output[i]; entry.prev_output[i] = output_data[i];
} }
entry.has_prev = true; entry.has_prev = true;
} }
void apply_cache(const void* cond, const float* input, float* output, size_t size) { void apply_cache(const void* cond,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) if (it == cache_diffs.end() || it->second.diff.empty())
return; return;
if (it->second.diff.size() != size) sd::apply_condition_cache_diff(it->second.diff, input, output);
return;
for (size_t i = 0; i < size; i++) {
output[i] = input[i] + it->second.diff[i];
}
} }
bool before_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output, float sigma, int step_index) { bool before_condition(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output, float sigma, int step_index) {
if (!enabled() || step_index < 0) if (!enabled() || step_index < 0)
return false; return false;
@ -900,8 +823,7 @@ struct CacheDitConditionState {
if (skip_current_step) { if (skip_current_step) {
if (has_cache(cond)) { if (has_cache(cond)) {
apply_cache(cond, (float*)input->data, (float*)output->data, apply_cache(cond, input, output);
static_cast<size_t>(ggml_nelements(output)));
return true; return true;
} }
return false; return false;
@ -914,13 +836,13 @@ struct CacheDitConditionState {
if (it == cache_diffs.end() || !it->second.has_prev) if (it == cache_diffs.end() || !it->second.has_prev)
return false; return false;
size_t ne = static_cast<size_t>(ggml_nelements(input)); size_t ne = static_cast<size_t>(input.numel());
if (it->second.prev_input.size() != ne) if (it->second.prev_input.size() != ne)
return false; return false;
float* input_data = (float*)input->data; const float* input_data = input.data();
float diff = CacheDitState::calculate_residual_diff( float diff = CacheDitState::calculate_residual_diff(
it->second.prev_input.data(), input_data, ne); it->second.prev_input.data(), input_data, ne);
float effective_threshold = config.residual_diff_threshold; float effective_threshold = config.residual_diff_threshold;
if (config.Fn_compute_blocks > 0) { if (config.Fn_compute_blocks > 0) {
@ -940,7 +862,7 @@ struct CacheDitConditionState {
cached_steps.push_back(current_step_index); cached_steps.push_back(current_step_index);
continuous_cached_steps++; continuous_cached_steps++;
accumulated_residual_diff += diff; accumulated_residual_diff += diff;
apply_cache(cond, input_data, (float*)output->data, ne); apply_cache(cond, input, output);
return true; return true;
} }
@ -948,15 +870,14 @@ struct CacheDitConditionState {
return false; return false;
} }
void after_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output) { void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
if (!step_is_active()) if (!step_is_active())
return; return;
size_t ne = static_cast<size_t>(ggml_nelements(output)); update_cache(cond, input, output);
update_cache(cond, (float*)input->data, (float*)output->data, ne);
if (cond == anchor_condition && taylor_config.enabled) { if (cond == anchor_condition && taylor_config.enabled) {
taylor_state.update_derivatives((float*)output->data, ne, current_step_index); taylor_state.update_derivatives(output.data(), static_cast<size_t>(output.numel()), current_step_index);
} }
} }

View File

@ -4,6 +4,7 @@
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "model.h" #include "model.h"
#include "tokenize_util.h" #include "tokenize_util.h"
#include "vocab/vocab.h"
/*================================================== CLIPTokenizer ===================================================*/ /*================================================== CLIPTokenizer ===================================================*/
@ -110,7 +111,7 @@ public:
if (merges_utf8_str.size() > 0) { if (merges_utf8_str.size() > 0) {
load_from_merges(merges_utf8_str); load_from_merges(merges_utf8_str);
} else { } else {
load_from_merges(ModelLoader::load_merges()); load_from_merges(load_clip_merges());
} }
add_special_token("<|startoftext|>"); add_special_token("<|startoftext|>");
add_special_token("<|endoftext|>"); add_special_token("<|endoftext|>");
@ -472,7 +473,7 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, n_token, d_model] // x: [N, n_token, d_model]
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]); auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]); auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
@ -510,7 +511,7 @@ public:
blocks["mlp"] = std::shared_ptr<GGMLBlock>(new CLIPMLP(d_model, intermediate_size)); blocks["mlp"] = std::shared_ptr<GGMLBlock>(new CLIPMLP(d_model, intermediate_size));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* mask = nullptr) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* mask = nullptr) {
// x: [N, n_token, d_model] // x: [N, n_token, d_model]
auto self_attn = std::dynamic_pointer_cast<MultiheadAttention>(blocks["self_attn"]); auto self_attn = std::dynamic_pointer_cast<MultiheadAttention>(blocks["self_attn"]);
auto layer_norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm1"]); auto layer_norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm1"]);
@ -540,10 +541,10 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* mask = nullptr, ggml_tensor* mask = nullptr,
int clip_skip = -1) { int clip_skip = -1) {
// x: [N, n_token, d_model] // x: [N, n_token, d_model]
int layer_idx = n_layer - 1; int layer_idx = n_layer - 1;
// LOG_DEBUG("clip_skip %d", clip_skip); // LOG_DEBUG("clip_skip %d", clip_skip);
@ -572,7 +573,7 @@ protected:
int64_t num_positions; int64_t num_positions;
bool force_clip_f32; bool force_clip_f32;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type token_wtype = GGML_TYPE_F32; enum ggml_type token_wtype = GGML_TYPE_F32;
if (!force_clip_f32) { if (!force_clip_f32) {
token_wtype = get_type(prefix + "token_embedding.weight", tensor_storage_map, GGML_TYPE_F32); token_wtype = get_type(prefix + "token_embedding.weight", tensor_storage_map, GGML_TYPE_F32);
@ -596,13 +597,13 @@ public:
force_clip_f32(force_clip_f32) { force_clip_f32(force_clip_f32) {
} }
struct ggml_tensor* get_token_embed_weight() { ggml_tensor* get_token_embed_weight() {
return params["token_embedding.weight"]; return params["token_embedding.weight"];
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* input_ids, ggml_tensor* input_ids,
struct ggml_tensor* custom_embed_weight) { ggml_tensor* custom_embed_weight) {
// input_ids: [N, n_token] // input_ids: [N, n_token]
auto token_embed_weight = params["token_embedding.weight"]; auto token_embed_weight = params["token_embedding.weight"];
auto position_embed_weight = params["position_embedding.weight"]; auto position_embed_weight = params["position_embedding.weight"];
@ -629,7 +630,7 @@ protected:
int num_patches; int num_patches;
int64_t num_positions; int64_t num_positions;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type patch_wtype = GGML_TYPE_F16; enum ggml_type patch_wtype = GGML_TYPE_F16;
enum ggml_type class_wtype = GGML_TYPE_F32; enum ggml_type class_wtype = GGML_TYPE_F32;
enum ggml_type position_wtype = GGML_TYPE_F32; enum ggml_type position_wtype = GGML_TYPE_F32;
@ -652,7 +653,7 @@ public:
num_positions = num_patches + 1; num_positions = num_patches + 1;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* pixel_values) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* pixel_values) {
// pixel_values: [N, num_channels, image_size, image_size] // pixel_values: [N, num_channels, image_size, image_size]
// return: [N, num_positions, embed_dim] // return: [N, num_positions, embed_dim]
GGML_ASSERT(pixel_values->ne[0] == image_size && pixel_values->ne[1] == image_size && pixel_values->ne[2] == num_channels); GGML_ASSERT(pixel_values->ne[0] == image_size && pixel_values->ne[1] == image_size && pixel_values->ne[2] == num_channels);
@ -662,20 +663,20 @@ public:
auto position_embed_weight = params["position_embedding.weight"]; auto position_embed_weight = params["position_embedding.weight"];
// concat(patch_embedding, class_embedding) + position_embedding // concat(patch_embedding, class_embedding) + position_embedding
struct ggml_tensor* patch_embedding; ggml_tensor* patch_embedding;
int64_t N = pixel_values->ne[3]; int64_t N = pixel_values->ne[3];
patch_embedding = ggml_ext_conv_2d(ctx->ggml_ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size] patch_embedding = ggml_ext_conv_2d(ctx->ggml_ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
patch_embedding = ggml_reshape_3d(ctx->ggml_ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches] patch_embedding = ggml_reshape_3d(ctx->ggml_ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
patch_embedding = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim] patch_embedding = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
patch_embedding = ggml_reshape_4d(ctx->ggml_ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1] patch_embedding = ggml_reshape_4d(ctx->ggml_ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
struct ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx->ggml_ctx, GGML_TYPE_F32, embed_dim, N); ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx->ggml_ctx, GGML_TYPE_F32, embed_dim, N);
class_embedding = ggml_repeat(ctx->ggml_ctx, class_embed_weight, class_embedding); // [N, embed_dim] class_embedding = ggml_repeat(ctx->ggml_ctx, class_embed_weight, class_embedding); // [N, embed_dim]
class_embedding = ggml_reshape_4d(ctx->ggml_ctx, class_embedding, 1, embed_dim, 1, N); // [N, 1, embed_dim, 1] class_embedding = ggml_reshape_4d(ctx->ggml_ctx, class_embedding, 1, embed_dim, 1, N); // [N, 1, embed_dim, 1]
struct ggml_tensor* x = ggml_concat(ctx->ggml_ctx, class_embedding, patch_embedding, 2); // [N, num_positions, embed_dim, 1] ggml_tensor* x = ggml_concat(ctx->ggml_ctx, class_embedding, patch_embedding, 2); // [N, num_positions, embed_dim, 1]
x = ggml_reshape_3d(ctx->ggml_ctx, x, embed_dim, num_positions, N); // [N, num_positions, embed_dim] x = ggml_reshape_3d(ctx->ggml_ctx, x, embed_dim, num_positions, N); // [N, num_positions, embed_dim]
x = ggml_add(ctx->ggml_ctx, x, position_embed_weight); x = ggml_add(ctx->ggml_ctx, x, position_embed_weight);
return x; // [N, num_positions, embed_dim] return x; // [N, num_positions, embed_dim]
} }
}; };
@ -692,7 +693,7 @@ enum CLIPVersion {
class CLIPTextModel : public GGMLBlock { class CLIPTextModel : public GGMLBlock {
protected: protected:
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
if (version == OPEN_CLIP_VIT_BIGG_14) { if (version == OPEN_CLIP_VIT_BIGG_14) {
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size); params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
@ -733,18 +734,18 @@ public:
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size)); blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
} }
struct ggml_tensor* get_token_embed_weight() { ggml_tensor* get_token_embed_weight() {
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]); auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
return embeddings->get_token_embed_weight(); return embeddings->get_token_embed_weight();
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* input_ids, ggml_tensor* input_ids,
struct ggml_tensor* tkn_embeddings, ggml_tensor* tkn_embeddings,
struct ggml_tensor* mask = nullptr, ggml_tensor* mask = nullptr,
size_t max_token_idx = 0, size_t max_token_idx = 0,
bool return_pooled = false, bool return_pooled = false,
int clip_skip = -1) { int clip_skip = -1) {
// input_ids: [N, n_token] // input_ids: [N, n_token]
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]); auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
auto encoder = std::dynamic_pointer_cast<CLIPEncoder>(blocks["encoder"]); auto encoder = std::dynamic_pointer_cast<CLIPEncoder>(blocks["encoder"]);
@ -803,10 +804,10 @@ public:
blocks["post_layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size)); blocks["post_layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* pixel_values, ggml_tensor* pixel_values,
bool return_pooled = true, bool return_pooled = true,
int clip_skip = -1) { int clip_skip = -1) {
// pixel_values: [N, num_channels, image_size, image_size] // pixel_values: [N, num_channels, image_size, image_size]
auto embeddings = std::dynamic_pointer_cast<CLIPVisionEmbeddings>(blocks["embeddings"]); auto embeddings = std::dynamic_pointer_cast<CLIPVisionEmbeddings>(blocks["embeddings"]);
auto pre_layernorm = std::dynamic_pointer_cast<LayerNorm>(blocks["pre_layernorm"]); auto pre_layernorm = std::dynamic_pointer_cast<LayerNorm>(blocks["pre_layernorm"]);
@ -838,7 +839,7 @@ protected:
int64_t out_features; int64_t out_features;
bool transpose_weight; bool transpose_weight;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
if (transpose_weight) { if (transpose_weight) {
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features); params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
@ -855,8 +856,8 @@ public:
out_features(out_features), out_features(out_features),
transpose_weight(transpose_weight) {} transpose_weight(transpose_weight) {}
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
struct ggml_tensor* w = params["weight"]; ggml_tensor* w = params["weight"];
if (transpose_weight) { if (transpose_weight) {
w = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, w)); w = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, w));
} }
@ -885,10 +886,10 @@ public:
blocks["visual_projection"] = std::shared_ptr<GGMLBlock>(new CLIPProjection(hidden_size, projection_dim, transpose_proj_w)); blocks["visual_projection"] = std::shared_ptr<GGMLBlock>(new CLIPProjection(hidden_size, projection_dim, transpose_proj_w));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* pixel_values, ggml_tensor* pixel_values,
bool return_pooled = true, bool return_pooled = true,
int clip_skip = -1) { int clip_skip = -1) {
// pixel_values: [N, num_channels, image_size, image_size] // pixel_values: [N, num_channels, image_size, image_size]
// return: [N, projection_dim] if return_pooled else [N, n_token, hidden_size] // return: [N, projection_dim] if return_pooled else [N, n_token, hidden_size]
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]); auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
@ -935,17 +936,17 @@ struct CLIPTextModelRunner : public GGMLRunner {
return "clip"; return "clip";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
model.get_param_tensors(tensors, prefix); model.get_param_tensors(tensors, prefix);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* input_ids, ggml_tensor* input_ids,
struct ggml_tensor* embeddings, ggml_tensor* embeddings,
struct ggml_tensor* mask, ggml_tensor* mask,
size_t max_token_idx = 0, size_t max_token_idx = 0,
bool return_pooled = false, bool return_pooled = false,
int clip_skip = -1) { int clip_skip = -1) {
size_t N = input_ids->ne[1]; size_t N = input_ids->ne[1];
size_t n_token = input_ids->ne[0]; size_t n_token = input_ids->ne[0];
if (input_ids->ne[0] > model.n_token) { if (input_ids->ne[0] > model.n_token) {
@ -956,17 +957,16 @@ struct CLIPTextModelRunner : public GGMLRunner {
return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip); return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids, ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
int num_custom_embeddings = 0, int num_custom_embeddings = 0,
void* custom_embeddings_data = nullptr, void* custom_embeddings_data = nullptr,
size_t max_token_idx = 0, size_t max_token_idx = 0,
bool return_pooled = false, bool return_pooled = false,
int clip_skip = -1) { int clip_skip = -1) {
struct ggml_cgraph* gf = new_graph_custom(2048); ggml_cgraph* gf = new_graph_custom(2048);
ggml_tensor* input_ids = make_input(input_ids_tensor);
input_ids = to_backend(input_ids); ggml_tensor* embeddings = nullptr;
struct ggml_tensor* embeddings = nullptr;
if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) { if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) {
auto token_embed_weight = model.get_token_embed_weight(); auto token_embed_weight = model.get_token_embed_weight();
@ -996,26 +996,28 @@ struct CLIPTextModelRunner : public GGMLRunner {
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip); ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip);
ggml_build_forward_expand(gf, hidden_states); ggml_build_forward_expand(gf, hidden_states);
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> compute(const int n_threads,
struct ggml_tensor* input_ids, const sd::Tensor<int32_t>& input_ids,
int num_custom_embeddings, int num_custom_embeddings,
void* custom_embeddings_data, void* custom_embeddings_data,
size_t max_token_idx, size_t max_token_idx,
bool return_pooled, bool return_pooled,
int clip_skip, int clip_skip) {
ggml_tensor** output, auto get_graph = [&]() -> ggml_cgraph* {
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip); return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
}; };
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx); auto result = GGMLRunner::compute<float>(get_graph, n_threads, true);
if (return_pooled) {
return take_or_empty(std::move(result));
}
return restore_trailing_singleton_dims(std::move(result), 3);
} }
}; };

View File

@ -1,5 +1,5 @@
#ifndef __COMMON_HPP__ #ifndef __COMMON_BLOCK_HPP__
#define __COMMON_HPP__ #define __COMMON_BLOCK_HPP__
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
@ -23,7 +23,7 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
if (vae_downsample) { if (vae_downsample) {
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]); auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
@ -52,7 +52,7 @@ public:
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1})); blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]); auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
@ -121,7 +121,7 @@ public:
} }
} }
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = nullptr) { virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* emb = nullptr) {
// For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml // For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml
// [N, c, t, h, w] => [N, c, t, h * w] // [N, c, t, h, w] => [N, c, t, h * w]
// x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w] // x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w]
@ -188,7 +188,7 @@ public:
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out * 2)); blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out * 2));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [ne3, ne2, ne1, dim_in] // x: [ne3, ne2, ne1, dim_in]
// return: [ne3, ne2, ne1, dim_out] // return: [ne3, ne2, ne1, dim_out]
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]); auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
@ -214,7 +214,7 @@ public:
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias)); blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [ne3, ne2, ne1, dim_in] // x: [ne3, ne2, ne1, dim_in]
// return: [ne3, ne2, ne1, dim_out] // return: [ne3, ne2, ne1, dim_out]
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]); auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
@ -258,7 +258,7 @@ public:
blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out, true, false, force_prec_f32, scale)); blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out, true, false, force_prec_f32, scale));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [ne3, ne2, ne1, dim] // x: [ne3, ne2, ne1, dim]
// return: [ne3, ne2, ne1, dim_out] // return: [ne3, ne2, ne1, dim_out]
@ -297,9 +297,9 @@ public:
// to_out_1 is nn.Dropout(), skip for inference // to_out_1 is nn.Dropout(), skip for inference
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context) { ggml_tensor* context) {
// x: [N, n_token, query_dim] // x: [N, n_token, query_dim]
// context: [N, n_context, context_dim] // context: [N, n_context, context_dim]
// return: [N, n_token, query_dim] // return: [N, n_token, query_dim]
@ -355,9 +355,9 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context) { ggml_tensor* context) {
// x: [N, n_token, query_dim] // x: [N, n_token, query_dim]
// context: [N, n_context, context_dim] // context: [N, n_context, context_dim]
// return: [N, n_token, query_dim] // return: [N, n_token, query_dim]
@ -406,7 +406,7 @@ protected:
int64_t context_dim = 768; // hidden_size, 1024 for VERSION_SD2 int64_t context_dim = 768; // hidden_size, 1024 for VERSION_SD2
bool use_linear = false; bool use_linear = false;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
auto iter = tensor_storage_map.find(prefix + "proj_out.weight"); auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
if (iter != tensor_storage_map.end()) { if (iter != tensor_storage_map.end()) {
int64_t inner_dim = n_head * d_head; int64_t inner_dim = n_head * d_head;
@ -456,9 +456,9 @@ public:
} }
} }
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context) { ggml_tensor* context) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// context: [N, max_position(aka n_token), hidden_size(aka context_dim)] // context: [N, max_position(aka n_token), hidden_size(aka context_dim)]
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
@ -510,7 +510,7 @@ public:
class AlphaBlender : public GGMLBlock { class AlphaBlender : public GGMLBlock {
protected: protected:
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
@ -530,9 +530,9 @@ public:
// since mix_factor.shape is [1,], we don't need rearrange using rearrange_pattern // since mix_factor.shape is [1,], we don't need rearrange using rearrange_pattern
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x_spatial, ggml_tensor* x_spatial,
struct ggml_tensor* x_temporal) { ggml_tensor* x_temporal) {
// image_only_indicator is always tensor([0.]) // image_only_indicator is always tensor([0.])
float alpha = get_alpha(); float alpha = get_alpha();
auto x = ggml_add(ctx->ggml_ctx, auto x = ggml_add(ctx->ggml_ctx,
@ -555,10 +555,10 @@ public:
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender()); blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* emb, ggml_tensor* emb,
int num_video_frames) { int num_video_frames) {
// x: [N, channels, h, w] aka [b*t, channels, h, w] // x: [N, channels, h, w] aka [b*t, channels, h, w]
// emb: [N, emb_channels] aka [b*t, emb_channels] // emb: [N, emb_channels] aka [b*t, emb_channels]
// image_only_indicator is always tensor([0.]) // image_only_indicator is always tensor([0.])
@ -590,4 +590,4 @@ public:
} }
}; };
#endif // __COMMON_HPP__ #endif // __COMMON_BLOCK_HPP__

108
src/common_dit.hpp Normal file
View File

@ -0,0 +1,108 @@
#ifndef __COMMON_DIT_HPP__
#define __COMMON_DIT_HPP__
#include "ggml_extend.hpp"
namespace DiT {
inline ggml_tensor* patchify(ggml_context* ctx,
ggml_tensor* x,
int pw,
int ph,
bool patch_last = true) {
// x: [N, C, H, W]
// return: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C]
int64_t N = x->ne[3];
int64_t C = x->ne[2];
int64_t H = x->ne[1];
int64_t W = x->ne[0];
int64_t h = H / ph;
int64_t w = W / pw;
GGML_ASSERT(h * ph == H && w * pw == W);
x = ggml_reshape_4d(ctx, x, pw, w, ph, h * C * N); // [N*C*h, ph, w, pw]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, ph, pw]
x = ggml_reshape_4d(ctx, x, pw * ph, w * h, C, N); // [N, C, h*w, ph*pw]
if (patch_last) {
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, h*w, C, ph*pw]
x = ggml_reshape_3d(ctx, x, pw * ph * C, w * h, N); // [N, h*w, C*ph*pw]
} else {
x = ggml_cont(ctx, ggml_ext_torch_permute(ctx, x, 2, 0, 1, 3)); // [N, h*w, C, ph*pw]
x = ggml_reshape_3d(ctx, x, C * pw * ph, w * h, N); // [N, h*w, ph*pw*C]
}
return x;
}
inline ggml_tensor* unpatchify(ggml_context* ctx,
ggml_tensor* x,
int64_t h,
int64_t w,
int ph,
int pw,
bool patch_last = true) {
// x: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C]
// return: [N, C, H, W]
int64_t N = x->ne[2];
int64_t C = x->ne[0] / ph / pw;
int64_t H = h * ph;
int64_t W = w * pw;
GGML_ASSERT(C * ph * pw == x->ne[0]);
if (patch_last) {
x = ggml_reshape_4d(ctx, x, pw * ph, C, w * h, N); // [N, h*w, C, ph*pw]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, C, h*w, ph*pw]
} else {
x = ggml_reshape_4d(ctx, x, C, pw * ph, w * h, N); // [N, h*w, ph*pw, C]
x = ggml_cont(ctx, ggml_permute(ctx, x, 2, 0, 1, 3)); // [N, C, h*w, ph*pw]
}
x = ggml_reshape_4d(ctx, x, pw, ph, w, h * C * N); // [N*C*h, w, ph, pw]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, ph, w, pw]
x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*ph, w*pw]
return x;
}
inline ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
ggml_tensor* x,
int ph,
int pw) {
int64_t W = x->ne[0];
int64_t H = x->ne[1];
int pad_h = (ph - H % ph) % ph;
int pad_w = (pw - W % pw) % pw;
x = ggml_ext_pad(ctx->ggml_ctx, x, pad_w, pad_h, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled);
return x;
}
inline ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx,
ggml_tensor* x,
int ph,
int pw,
bool patch_last = true) {
x = pad_to_patch_size(ctx, x, ph, pw);
x = patchify(ctx->ggml_ctx, x, ph, pw, patch_last);
return x;
}
inline ggml_tensor* unpatchify_and_crop(ggml_context* ctx,
ggml_tensor* x,
int64_t H,
int64_t W,
int ph,
int pw,
bool patch_last = true) {
int pad_h = (ph - H % ph) % ph;
int pad_w = (pw - W % pw) % pw;
int64_t h = ((H + pad_h) / ph);
int64_t w = ((W + pad_w) / pw);
x = unpatchify(ctx, x, h, w, ph, pw, patch_last); // [N, C, H + pad_h, W + pad_w]
x = ggml_ext_slice(ctx, x, 1, 0, H); // [N, C, H, W + pad_w]
x = ggml_ext_slice(ctx, x, 0, 0, W); // [N, C, H, W]
return x;
}
} // namespace DiT
#endif // __COMMON_DIT_HPP__

View File

@ -0,0 +1,64 @@
#ifndef __CONDITION_CACHE_UTILS_HPP__
#define __CONDITION_CACHE_UTILS_HPP__
#include <vector>
#include "tensor.hpp"
namespace sd {
inline bool store_condition_cache_diff(std::vector<float>* diff,
const sd::Tensor<float>& input,
const sd::Tensor<float>& output) {
if (diff == nullptr || input.empty() || output.empty()) {
return false;
}
size_t input_size = static_cast<size_t>(input.numel());
size_t output_size = static_cast<size_t>(output.numel());
if (input_size == 0 || input_size != output_size) {
diff->clear();
return false;
}
const float* input_data = input.data();
const float* output_data = output.data();
if (input_data == nullptr || output_data == nullptr) {
diff->clear();
return false;
}
diff->resize(output_size);
for (size_t i = 0; i < output_size; ++i) {
(*diff)[i] = output_data[i] - input_data[i];
}
return true;
}
inline bool apply_condition_cache_diff(const std::vector<float>& diff,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
if (output == nullptr || input.empty() || diff.empty()) {
return false;
}
size_t input_size = static_cast<size_t>(input.numel());
if (input_size == 0 || diff.size() != input_size) {
return false;
}
*output = input;
float* output_data = output->data();
if (output_data == nullptr) {
return false;
}
for (size_t i = 0; i < input_size; ++i) {
output_data[i] += diff[i];
}
return true;
}
} // namespace sd
#endif // __CONDITION_CACHE_UTILS_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,7 @@
#ifndef __CONTROL_HPP__ #ifndef __CONTROL_HPP__
#define __CONTROL_HPP__ #define __CONTROL_HPP__
#include "common.hpp" #include "common_block.hpp"
#include "ggml_extend.hpp"
#include "model.h" #include "model.h"
#define CONTROL_NET_GRAPH_SIZE 1536 #define CONTROL_NET_GRAPH_SIZE 1536
@ -165,26 +164,26 @@ public:
blocks["middle_block_out.0"] = std::shared_ptr<GGMLBlock>(make_zero_conv(ch)); blocks["middle_block_out.0"] = std::shared_ptr<GGMLBlock>(make_zero_conv(ch));
} }
struct ggml_tensor* resblock_forward(std::string name, ggml_tensor* resblock_forward(std::string name,
GGMLRunnerContext* ctx, GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* emb) { ggml_tensor* emb) {
auto block = std::dynamic_pointer_cast<ResBlock>(blocks[name]); auto block = std::dynamic_pointer_cast<ResBlock>(blocks[name]);
return block->forward(ctx, x, emb); return block->forward(ctx, x, emb);
} }
struct ggml_tensor* attention_layer_forward(std::string name, ggml_tensor* attention_layer_forward(std::string name,
GGMLRunnerContext* ctx, GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context) { ggml_tensor* context) {
auto block = std::dynamic_pointer_cast<SpatialTransformer>(blocks[name]); auto block = std::dynamic_pointer_cast<SpatialTransformer>(blocks[name]);
return block->forward(ctx, x, context); return block->forward(ctx, x, context);
} }
struct ggml_tensor* input_hint_block_forward(GGMLRunnerContext* ctx, ggml_tensor* input_hint_block_forward(GGMLRunnerContext* ctx,
struct ggml_tensor* hint, ggml_tensor* hint,
struct ggml_tensor* emb, ggml_tensor* emb,
struct ggml_tensor* context) { ggml_tensor* context) {
int num_input_blocks = 15; int num_input_blocks = 15;
auto h = hint; auto h = hint;
for (int i = 0; i < num_input_blocks; i++) { for (int i = 0; i < num_input_blocks; i++) {
@ -199,13 +198,13 @@ public:
return h; return h;
} }
std::vector<struct ggml_tensor*> forward(GGMLRunnerContext* ctx, std::vector<ggml_tensor*> forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* hint, ggml_tensor* hint,
struct ggml_tensor* guided_hint, ggml_tensor* guided_hint,
struct ggml_tensor* timesteps, ggml_tensor* timesteps,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* y = nullptr) { ggml_tensor* y = nullptr) {
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w] // x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
// timesteps: [N,] // timesteps: [N,]
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768] // context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
@ -247,7 +246,7 @@ public:
emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim] emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim]
} }
std::vector<struct ggml_tensor*> outs; std::vector<ggml_tensor*> outs;
if (guided_hint == nullptr) { if (guided_hint == nullptr) {
guided_hint = input_hint_block_forward(ctx, hint, emb, context); guided_hint = input_hint_block_forward(ctx, hint, emb, context);
@ -311,11 +310,13 @@ struct ControlNet : public GGMLRunner {
SDVersion version = VERSION_SD1; SDVersion version = VERSION_SD1;
ControlNetBlock control_net; ControlNetBlock control_net;
ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory ggml_backend_buffer_t control_buffer = nullptr;
ggml_context* control_ctx = nullptr; ggml_context* control_ctx = nullptr;
std::vector<struct ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5 std::vector<ggml_tensor*> control_outputs_ggml;
struct ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference ggml_tensor* guided_hint_output_ggml = nullptr;
bool guided_hint_cached = false; std::vector<sd::Tensor<float>> controls;
sd::Tensor<float> guided_hint;
bool guided_hint_cached = false;
ControlNet(ggml_backend_t backend, ControlNet(ggml_backend_t backend,
bool offload_params_to_cpu, bool offload_params_to_cpu,
@ -329,23 +330,23 @@ struct ControlNet : public GGMLRunner {
free_control_ctx(); free_control_ctx();
} }
void alloc_control_ctx(std::vector<struct ggml_tensor*> outs) { void alloc_control_ctx(std::vector<ggml_tensor*> outs) {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024; params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024;
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = true; params.no_alloc = true;
control_ctx = ggml_init(params); control_ctx = ggml_init(params);
controls.resize(outs.size() - 1); control_outputs_ggml.resize(outs.size() - 1);
size_t control_buffer_size = 0; size_t control_buffer_size = 0;
guided_hint = ggml_dup_tensor(control_ctx, outs[0]); guided_hint_output_ggml = ggml_dup_tensor(control_ctx, outs[0]);
control_buffer_size += ggml_nbytes(guided_hint); control_buffer_size += ggml_nbytes(guided_hint_output_ggml);
for (int i = 0; i < outs.size() - 1; i++) { for (int i = 0; i < outs.size() - 1; i++) {
controls[i] = ggml_dup_tensor(control_ctx, outs[i + 1]); control_outputs_ggml[i] = ggml_dup_tensor(control_ctx, outs[i + 1]);
control_buffer_size += ggml_nbytes(controls[i]); control_buffer_size += ggml_nbytes(control_outputs_ggml[i]);
} }
control_buffer = ggml_backend_alloc_ctx_tensors(control_ctx, runtime_backend); control_buffer = ggml_backend_alloc_ctx_tensors(control_ctx, runtime_backend);
@ -362,8 +363,10 @@ struct ControlNet : public GGMLRunner {
ggml_free(control_ctx); ggml_free(control_ctx);
control_ctx = nullptr; control_ctx = nullptr;
} }
guided_hint = nullptr; guided_hint_output_ggml = nullptr;
guided_hint_cached = false; guided_hint_cached = false;
guided_hint = {};
control_outputs_ggml.clear();
controls.clear(); controls.clear();
} }
@ -371,33 +374,37 @@ struct ControlNet : public GGMLRunner {
return "control_net"; return "control_net";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
control_net.get_param_tensors(tensors, prefix); control_net.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* hint, const sd::Tensor<float>& hint_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor = {},
struct ggml_tensor* y = nullptr) { const sd::Tensor<float>& y_tensor = {}) {
struct ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
x = to_backend(x); ggml_tensor* x = make_input(x_tensor);
if (guided_hint_cached) { ggml_tensor* hint = nullptr;
hint = nullptr; ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
ggml_tensor* guided_hint_input = nullptr;
if (guided_hint_cached && !guided_hint.empty()) {
guided_hint_input = make_input(guided_hint);
hint = nullptr;
} else { } else {
hint = to_backend(hint); hint = make_input(hint_tensor);
} }
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
auto outs = control_net.forward(&runner_ctx, auto outs = control_net.forward(&runner_ctx,
x, x,
hint, hint,
guided_hint_cached ? guided_hint : nullptr, guided_hint_input,
timesteps, timesteps,
context, context,
y); y);
@ -406,36 +413,46 @@ struct ControlNet : public GGMLRunner {
alloc_control_ctx(outs); alloc_control_ctx(outs);
} }
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint)); ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint_output_ggml));
for (int i = 0; i < outs.size() - 1; i++) { for (int i = 0; i < outs.size() - 1; i++) {
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], controls[i])); ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], control_outputs_ggml[i]));
} }
return gf; return gf;
} }
bool compute(int n_threads, std::optional<std::vector<sd::Tensor<float>>> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* hint, const sd::Tensor<float>& hint,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context = {},
struct ggml_tensor* y, const sd::Tensor<float>& y = {}) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, hint, timesteps, context, y); return build_graph(x, hint, timesteps, context, y);
}; };
bool res = GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); auto compute_result = GGMLRunner::compute<float>(get_graph, n_threads, false);
if (res) { if (!compute_result.has_value()) {
// cache guided_hint return std::nullopt;
guided_hint_cached = true;
} }
return res;
if (guided_hint_output_ggml != nullptr) {
guided_hint = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(guided_hint_output_ggml),
4);
}
controls.clear();
controls.reserve(control_outputs_ggml.size());
for (ggml_tensor* control : control_outputs_ggml) {
auto control_host = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(control), 4);
GGML_ASSERT(!control_host.empty());
controls.push_back(std::move(control_host));
}
guided_hint_cached = true;
return controls;
} }
bool load_from_file(const std::string& file_path, int n_threads) { bool load_from_file(const std::string& file_path, int n_threads) {
@ -463,4 +480,4 @@ struct ControlNet : public GGMLRunner {
} }
}; };
#endif // __CONTROL_HPP__ #endif // __CONTROL_HPP__

1321
src/denoiser.hpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,41 +1,50 @@
#ifndef __DIFFUSION_MODEL_H__ #ifndef __DIFFUSION_MODEL_H__
#define __DIFFUSION_MODEL_H__ #define __DIFFUSION_MODEL_H__
#include <optional>
#include "anima.hpp"
#include "flux.hpp" #include "flux.hpp"
#include "mmdit.hpp" #include "mmdit.hpp"
#include "qwen_image.hpp" #include "qwen_image.hpp"
#include "tensor_ggml.hpp"
#include "unet.hpp" #include "unet.hpp"
#include "wan.hpp" #include "wan.hpp"
#include "z_image.hpp" #include "z_image.hpp"
struct DiffusionParams { struct DiffusionParams {
struct ggml_tensor* x = nullptr; const sd::Tensor<float>* x = nullptr;
struct ggml_tensor* timesteps = nullptr; const sd::Tensor<float>* timesteps = nullptr;
struct ggml_tensor* context = nullptr; const sd::Tensor<float>* context = nullptr;
struct ggml_tensor* c_concat = nullptr; const sd::Tensor<float>* c_concat = nullptr;
struct ggml_tensor* y = nullptr; const sd::Tensor<float>* y = nullptr;
struct ggml_tensor* guidance = nullptr; const sd::Tensor<int32_t>* t5_ids = nullptr;
std::vector<ggml_tensor*> ref_latents = {}; const sd::Tensor<float>* t5_weights = nullptr;
bool increase_ref_index = false; const sd::Tensor<float>* guidance = nullptr;
int num_video_frames = -1; const std::vector<sd::Tensor<float>>* ref_latents = nullptr;
std::vector<struct ggml_tensor*> controls = {}; bool increase_ref_index = false;
float control_strength = 0.f; int num_video_frames = -1;
struct ggml_tensor* vace_context = nullptr; const std::vector<sd::Tensor<float>>* controls = nullptr;
float vace_strength = 1.f; float control_strength = 0.f;
std::vector<int> skip_layers = {}; const sd::Tensor<float>* vace_context = nullptr;
float vace_strength = 1.f;
const std::vector<int>* skip_layers = nullptr;
}; };
template <typename T>
static inline const sd::Tensor<T>& tensor_or_empty(const sd::Tensor<T>* tensor) {
static const sd::Tensor<T> kEmpty;
return tensor != nullptr ? *tensor : kEmpty;
}
struct DiffusionModel { struct DiffusionModel {
virtual std::string get_desc() = 0; virtual std::string get_desc() = 0;
virtual bool compute(int n_threads, virtual sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) = 0;
struct ggml_tensor** output = nullptr, virtual void alloc_params_buffer() = 0;
struct ggml_context* output_ctx = nullptr) = 0; virtual void free_params_buffer() = 0;
virtual void alloc_params_buffer() = 0; virtual void free_compute_buffer() = 0;
virtual void free_params_buffer() = 0; virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) = 0;
virtual void free_compute_buffer() = 0; virtual size_t get_params_buffer_size() = 0;
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) = 0;
virtual size_t get_params_buffer_size() = 0;
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){}; virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){};
virtual int64_t get_adm_in_channels() = 0; virtual int64_t get_adm_in_channels() = 0;
virtual void set_flash_attention_enabled(bool enabled) = 0; virtual void set_flash_attention_enabled(bool enabled) = 0;
@ -68,7 +77,7 @@ struct UNetModel : public DiffusionModel {
unet.free_compute_buffer(); unet.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
unet.get_param_tensors(tensors, "model.diffusion_model"); unet.get_param_tensors(tensors, "model.diffusion_model");
} }
@ -92,19 +101,20 @@ struct UNetModel : public DiffusionModel {
unet.set_circular_axes(circular_x, circular_y); unet.set_circular_axes(circular_x, circular_y);
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) override {
struct ggml_tensor** output = nullptr, GGML_ASSERT(diffusion_params.x != nullptr);
struct ggml_context* output_ctx = nullptr) override { GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_controls;
return unet.compute(n_threads, return unet.compute(n_threads,
diffusion_params.x, *diffusion_params.x,
diffusion_params.timesteps, *diffusion_params.timesteps,
diffusion_params.context, tensor_or_empty(diffusion_params.context),
diffusion_params.c_concat, tensor_or_empty(diffusion_params.c_concat),
diffusion_params.y, tensor_or_empty(diffusion_params.y),
diffusion_params.num_video_frames, diffusion_params.num_video_frames,
diffusion_params.controls, diffusion_params.controls ? *diffusion_params.controls : empty_controls,
diffusion_params.control_strength, output, output_ctx); diffusion_params.control_strength);
} }
}; };
@ -133,7 +143,7 @@ struct MMDiTModel : public DiffusionModel {
mmdit.free_compute_buffer(); mmdit.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
mmdit.get_param_tensors(tensors, "model.diffusion_model"); mmdit.get_param_tensors(tensors, "model.diffusion_model");
} }
@ -157,18 +167,17 @@ struct MMDiTModel : public DiffusionModel {
mmdit.set_circular_axes(circular_x, circular_y); mmdit.set_circular_axes(circular_x, circular_y);
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) override {
struct ggml_tensor** output = nullptr, GGML_ASSERT(diffusion_params.x != nullptr);
struct ggml_context* output_ctx = nullptr) override { GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<int> empty_skip_layers;
return mmdit.compute(n_threads, return mmdit.compute(n_threads,
diffusion_params.x, *diffusion_params.x,
diffusion_params.timesteps, *diffusion_params.timesteps,
diffusion_params.context, tensor_or_empty(diffusion_params.context),
diffusion_params.y, tensor_or_empty(diffusion_params.y),
output, diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers);
output_ctx,
diffusion_params.skip_layers);
} }
}; };
@ -199,7 +208,7 @@ struct FluxModel : public DiffusionModel {
flux.free_compute_buffer(); flux.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
flux.get_param_tensors(tensors, "model.diffusion_model"); flux.get_param_tensors(tensors, "model.diffusion_model");
} }
@ -223,22 +232,86 @@ struct FluxModel : public DiffusionModel {
flux.set_circular_axes(circular_x, circular_y); flux.set_circular_axes(circular_x, circular_y);
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) override {
struct ggml_tensor** output = nullptr, GGML_ASSERT(diffusion_params.x != nullptr);
struct ggml_context* output_ctx = nullptr) override { GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_ref_latents;
static const std::vector<int> empty_skip_layers;
return flux.compute(n_threads, return flux.compute(n_threads,
diffusion_params.x, *diffusion_params.x,
diffusion_params.timesteps, *diffusion_params.timesteps,
diffusion_params.context, tensor_or_empty(diffusion_params.context),
diffusion_params.c_concat, tensor_or_empty(diffusion_params.c_concat),
diffusion_params.y, tensor_or_empty(diffusion_params.y),
diffusion_params.guidance, tensor_or_empty(diffusion_params.guidance),
diffusion_params.ref_latents, diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
diffusion_params.increase_ref_index, diffusion_params.increase_ref_index,
output, diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers);
output_ctx, }
diffusion_params.skip_layers); };
struct AnimaModel : public DiffusionModel {
std::string prefix;
Anima::AnimaRunner anima;
AnimaModel(ggml_backend_t backend,
bool offload_params_to_cpu,
const String2TensorStorage& tensor_storage_map = {},
const std::string prefix = "model.diffusion_model")
: prefix(prefix), anima(backend, offload_params_to_cpu, tensor_storage_map, prefix) {
}
std::string get_desc() override {
return anima.get_desc();
}
void alloc_params_buffer() override {
anima.alloc_params_buffer();
}
void free_params_buffer() override {
anima.free_params_buffer();
}
void free_compute_buffer() override {
anima.free_compute_buffer();
}
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
anima.get_param_tensors(tensors, prefix);
}
size_t get_params_buffer_size() override {
return anima.get_params_buffer_size();
}
void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) override {
anima.set_weight_adapter(adapter);
}
int64_t get_adm_in_channels() override {
return 768;
}
void set_flash_attention_enabled(bool enabled) {
anima.set_flash_attention_enabled(enabled);
}
void set_circular_axes(bool circular_x, bool circular_y) override {
anima.set_circular_axes(circular_x, circular_y);
}
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
return anima.compute(n_threads,
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
tensor_or_empty(diffusion_params.t5_ids),
tensor_or_empty(diffusion_params.t5_weights));
} }
}; };
@ -270,7 +343,7 @@ struct WanModel : public DiffusionModel {
wan.free_compute_buffer(); wan.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
wan.get_param_tensors(tensors, prefix); wan.get_param_tensors(tensors, prefix);
} }
@ -294,21 +367,19 @@ struct WanModel : public DiffusionModel {
wan.set_circular_axes(circular_x, circular_y); wan.set_circular_axes(circular_x, circular_y);
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) override {
struct ggml_tensor** output = nullptr, GGML_ASSERT(diffusion_params.x != nullptr);
struct ggml_context* output_ctx = nullptr) override { GGML_ASSERT(diffusion_params.timesteps != nullptr);
return wan.compute(n_threads, return wan.compute(n_threads,
diffusion_params.x, *diffusion_params.x,
diffusion_params.timesteps, *diffusion_params.timesteps,
diffusion_params.context, tensor_or_empty(diffusion_params.context),
diffusion_params.y, tensor_or_empty(diffusion_params.y),
diffusion_params.c_concat, tensor_or_empty(diffusion_params.c_concat),
nullptr, sd::Tensor<float>(),
diffusion_params.vace_context, tensor_or_empty(diffusion_params.vace_context),
diffusion_params.vace_strength, diffusion_params.vace_strength);
output,
output_ctx);
} }
}; };
@ -341,7 +412,7 @@ struct QwenImageModel : public DiffusionModel {
qwen_image.free_compute_buffer(); qwen_image.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
qwen_image.get_param_tensors(tensors, prefix); qwen_image.get_param_tensors(tensors, prefix);
} }
@ -365,18 +436,17 @@ struct QwenImageModel : public DiffusionModel {
qwen_image.set_circular_axes(circular_x, circular_y); qwen_image.set_circular_axes(circular_x, circular_y);
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) override {
struct ggml_tensor** output = nullptr, GGML_ASSERT(diffusion_params.x != nullptr);
struct ggml_context* output_ctx = nullptr) override { GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_ref_latents;
return qwen_image.compute(n_threads, return qwen_image.compute(n_threads,
diffusion_params.x, *diffusion_params.x,
diffusion_params.timesteps, *diffusion_params.timesteps,
diffusion_params.context, tensor_or_empty(diffusion_params.context),
diffusion_params.ref_latents, diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
true, // increase_ref_index true);
output,
output_ctx);
} }
}; };
@ -408,7 +478,7 @@ struct ZImageModel : public DiffusionModel {
z_image.free_compute_buffer(); z_image.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
z_image.get_param_tensors(tensors, prefix); z_image.get_param_tensors(tensors, prefix);
} }
@ -432,18 +502,17 @@ struct ZImageModel : public DiffusionModel {
z_image.set_circular_axes(circular_x, circular_y); z_image.set_circular_axes(circular_x, circular_y);
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
DiffusionParams diffusion_params, const DiffusionParams& diffusion_params) override {
struct ggml_tensor** output = nullptr, GGML_ASSERT(diffusion_params.x != nullptr);
struct ggml_context* output_ctx = nullptr) override { GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_ref_latents;
return z_image.compute(n_threads, return z_image.compute(n_threads,
diffusion_params.x, *diffusion_params.x,
diffusion_params.timesteps, *diffusion_params.timesteps,
diffusion_params.context, tensor_or_empty(diffusion_params.context),
diffusion_params.ref_latents, diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
true, // increase_ref_index true);
output,
output_ctx);
} }
}; };

View File

@ -1,10 +1,15 @@
#ifndef __EASYCACHE_HPP__
#define __EASYCACHE_HPP__
#include <cmath> #include <cmath>
#include <limits> #include <limits>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "condition_cache_utils.hpp"
#include "denoiser.hpp" #include "denoiser.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct EasyCacheConfig { struct EasyCacheConfig {
bool enabled = false; bool enabled = false;
@ -19,15 +24,15 @@ struct EasyCacheCacheEntry {
struct EasyCacheState { struct EasyCacheState {
EasyCacheConfig config; EasyCacheConfig config;
Denoiser* denoiser = nullptr; Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max(); float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f; float end_sigma = 0.0f;
bool initialized = false; bool initialized = false;
bool initial_step = true; bool initial_step = true;
bool skip_current_step = false; bool skip_current_step = false;
bool step_active = false; bool step_active = false;
const SDCondition* anchor_condition = nullptr; const void* anchor_condition = nullptr;
std::unordered_map<const SDCondition*, EasyCacheCacheEntry> cache_diffs; std::unordered_map<const void*, EasyCacheCacheEntry> cache_diffs;
std::vector<float> prev_input; std::vector<float> prev_input;
std::vector<float> prev_output; std::vector<float> prev_output;
float output_prev_norm = 0.0f; float output_prev_norm = 0.0f;
@ -120,41 +125,30 @@ struct EasyCacheState {
return enabled() && step_active && skip_current_step; return enabled() && step_active && skip_current_step;
} }
bool has_cache(const SDCondition* cond) const { bool has_cache(const void* cond) const {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
return it != cache_diffs.end() && !it->second.diff.empty(); return it != cache_diffs.end() && !it->second.diff.empty();
} }
void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
EasyCacheCacheEntry& entry = cache_diffs[cond]; EasyCacheCacheEntry& entry = cache_diffs[cond];
size_t ne = static_cast<size_t>(ggml_nelements(output)); sd::store_condition_cache_diff(&entry.diff, input, output);
entry.diff.resize(ne);
float* out_data = (float*)output->data;
float* in_data = (float*)input->data;
for (size_t i = 0; i < ne; ++i) {
entry.diff[i] = out_data[i] - in_data[i];
}
} }
void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) { if (it == cache_diffs.end() || it->second.diff.empty()) {
return; return;
} }
copy_ggml_tensor(output, input); sd::apply_condition_cache_diff(it->second.diff, input, output);
float* out_data = (float*)output->data;
const std::vector<float>& diff = it->second.diff;
for (size_t i = 0; i < diff.size(); ++i) {
out_data[i] += diff[i];
}
} }
bool before_condition(const SDCondition* cond, bool before_condition(const void* cond,
ggml_tensor* input, const sd::Tensor<float>& input,
ggml_tensor* output, sd::Tensor<float>* output,
float sigma, float sigma,
int step_index) { int step_index) {
if (!enabled() || step_index < 0) { if (!enabled() || step_index < 0 || output == nullptr) {
return false; return false;
} }
if (step_index != current_step_index) { if (step_index != current_step_index) {
@ -181,12 +175,12 @@ struct EasyCacheState {
if (!has_prev_input || !has_prev_output || !has_cache(cond)) { if (!has_prev_input || !has_prev_output || !has_cache(cond)) {
return false; return false;
} }
size_t ne = static_cast<size_t>(ggml_nelements(input)); size_t ne = static_cast<size_t>(input.numel());
if (prev_input.size() != ne) { if (prev_input.size() != ne) {
return false; return false;
} }
float* input_data = (float*)input->data; const float* input_data = input.data();
last_input_change = 0.0f; last_input_change = 0.0f;
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
last_input_change += std::fabs(input_data[i] - prev_input[i]); last_input_change += std::fabs(input_data[i] - prev_input[i]);
} }
@ -211,7 +205,7 @@ struct EasyCacheState {
return false; return false;
} }
void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
if (!step_is_active()) { if (!step_is_active()) {
return; return;
} }
@ -220,16 +214,16 @@ struct EasyCacheState {
return; return;
} }
size_t ne = static_cast<size_t>(ggml_nelements(input)); size_t ne = static_cast<size_t>(input.numel());
float* in_data = (float*)input->data; const float* in_data = input.data();
prev_input.resize(ne); prev_input.resize(ne);
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
prev_input[i] = in_data[i]; prev_input[i] = in_data[i];
} }
has_prev_input = true; has_prev_input = true;
float* out_data = (float*)output->data; const float* out_data = output.data();
float output_change = 0.0f; float output_change = 0.0f;
if (has_prev_output && prev_output.size() == ne) { if (has_prev_output && prev_output.size() == ne) {
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
output_change += std::fabs(out_data[i] - prev_output[i]); output_change += std::fabs(out_data[i] - prev_output[i]);
@ -262,4 +256,6 @@ struct EasyCacheState {
cumulative_change_rate = 0.0f; cumulative_change_rate = 0.0f;
has_last_input_change = false; has_last_input_change = false;
} }
}; };
#endif

View File

@ -27,11 +27,11 @@ public:
blocks["conv5"] = std::shared_ptr<GGMLBlock>(new Conv2d(num_feat + 4 * num_grow_ch, num_feat, {3, 3}, {1, 1}, {1, 1})); blocks["conv5"] = std::shared_ptr<GGMLBlock>(new Conv2d(num_feat + 4 * num_grow_ch, num_feat, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* lrelu(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* lrelu(GGMLRunnerContext* ctx, ggml_tensor* x) {
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true); return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [n, num_feat, h, w] // x: [n, num_feat, h, w]
// return: [n, num_feat, h, w] // return: [n, num_feat, h, w]
@ -64,7 +64,7 @@ public:
blocks["rdb3"] = std::shared_ptr<GGMLBlock>(new ResidualDenseBlock(num_feat, num_grow_ch)); blocks["rdb3"] = std::shared_ptr<GGMLBlock>(new ResidualDenseBlock(num_feat, num_grow_ch));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [n, num_feat, h, w] // x: [n, num_feat, h, w]
// return: [n, num_feat, h, w] // return: [n, num_feat, h, w]
@ -112,11 +112,11 @@ public:
int get_scale() { return scale; } int get_scale() { return scale; }
int get_num_block() { return num_block; } int get_num_block() { return num_block; }
struct ggml_tensor* lrelu(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* lrelu(GGMLRunnerContext* ctx, ggml_tensor* x) {
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true); return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [n, num_in_ch, h, w] // x: [n, num_in_ch, h, w]
// return: [n, num_out_ch, h*scale, w*scale] // return: [n, num_out_ch, h*scale, w*scale]
auto conv_first = std::dynamic_pointer_cast<Conv2d>(blocks["conv_first"]); auto conv_first = std::dynamic_pointer_cast<Conv2d>(blocks["conv_first"]);
@ -341,28 +341,25 @@ struct ESRGAN : public GGMLRunner {
return success; return success;
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x) { ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor) {
if (!rrdb_net) if (!rrdb_net)
return nullptr; return nullptr;
constexpr int kGraphNodes = 1 << 16; // 65k constexpr int kGraphNodes = 1 << 16; // 65k
struct ggml_cgraph* gf = new_graph_custom(kGraphNodes); ggml_cgraph* gf = new_graph_custom(kGraphNodes);
x = to_backend(x); ggml_tensor* x = make_input(x_tensor);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = rrdb_net->forward(&runner_ctx, x); ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> compute(const int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x) {
ggml_tensor** output, auto get_graph = [&]() -> ggml_cgraph* { return build_graph(x); };
ggml_context* output_ctx = nullptr) { auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
auto get_graph = [&]() -> struct ggml_cgraph* { return result;
return build_graph(x);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
}; };
#endif // __ESRGAN_HPP__ #endif // __ESRGAN_HPP__

View File

@ -4,7 +4,7 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "ggml_extend.hpp" #include "common_dit.hpp"
#include "model.h" #include "model.h"
#include "rope.hpp" #include "rope.hpp"
@ -19,7 +19,7 @@ namespace Flux {
blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, bias)); blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [..., in_dim] // x: [..., in_dim]
// return: [..., hidden_dim] // return: [..., hidden_dim]
auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]); auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]);
@ -37,7 +37,7 @@ namespace Flux {
int64_t hidden_size; int64_t hidden_size;
float eps; float eps;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
ggml_type wtype = GGML_TYPE_F32; ggml_type wtype = GGML_TYPE_F32;
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
} }
@ -48,10 +48,10 @@ namespace Flux {
: hidden_size(hidden_size), : hidden_size(hidden_size),
eps(eps) {} eps(eps) {}
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
struct ggml_tensor* w = params["scale"]; ggml_tensor* w = params["scale"];
x = ggml_rms_norm(ctx->ggml_ctx, x, eps); x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
x = ggml_mul(ctx->ggml_ctx, x, w); x = ggml_mul(ctx->ggml_ctx, x, w);
return x; return x;
} }
}; };
@ -63,7 +63,7 @@ namespace Flux {
blocks["key_norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(dim)); blocks["key_norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(dim));
} }
struct ggml_tensor* query_norm(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* query_norm(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [..., dim] // x: [..., dim]
// return: [..., dim] // return: [..., dim]
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["query_norm"]); auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["query_norm"]);
@ -72,7 +72,7 @@ namespace Flux {
return x; return x;
} }
struct ggml_tensor* key_norm(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* key_norm(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [..., dim] // x: [..., dim]
// return: [..., dim] // return: [..., dim]
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["key_norm"]); auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["key_norm"]);
@ -98,32 +98,34 @@ namespace Flux {
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim, proj_bias)); blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim, proj_bias));
} }
std::vector<struct ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) { std::vector<ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]); auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]);
auto qkv = qkv_proj->forward(ctx, x); auto qkv = qkv_proj->forward(ctx, x);
auto qkv_vec = ggml_ext_chunk(ctx->ggml_ctx, qkv, 3, 0, true); int64_t head_dim = qkv->ne[0] / 3 / num_heads;
int64_t head_dim = qkv_vec[0]->ne[0] / num_heads; auto q = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2],
auto q = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[0], head_dim, num_heads, qkv_vec[0]->ne[1], qkv_vec[0]->ne[2]); qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], 0);
auto k = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[1], head_dim, num_heads, qkv_vec[1]->ne[1], qkv_vec[1]->ne[2]); auto k = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2],
auto v = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[2], head_dim, num_heads, qkv_vec[2]->ne[1], qkv_vec[2]->ne[2]); qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], (qkv->nb[0]) * qkv->ne[0] / 3);
auto v = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2],
qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], (qkv->nb[0]) * 2 * qkv->ne[0] / 3);
q = norm->query_norm(ctx, q); q = norm->query_norm(ctx, q);
k = norm->key_norm(ctx, k); k = norm->key_norm(ctx, k);
return {q, k, v}; return {q, k, v};
} }
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]); auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
x = proj->forward(ctx, x); // [N, n_token, dim] x = proj->forward(ctx, x); // [N, n_token, dim]
return x; return x;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask) { ggml_tensor* mask) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// pe: [n_token, d_head/2, 2, 2] // pe: [n_token, d_head/2, 2, 2]
// return [N, n_token, dim] // return [N, n_token, dim]
@ -145,7 +147,7 @@ namespace Flux {
blocks["2"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias); blocks["2"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["0"]); auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]); auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
@ -168,7 +170,7 @@ namespace Flux {
blocks["down_proj"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias); blocks["down_proj"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]); auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]); auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
auto down_proj = std::dynamic_pointer_cast<Linear>(blocks["down_proj"]); auto down_proj = std::dynamic_pointer_cast<Linear>(blocks["down_proj"]);
@ -210,7 +212,7 @@ namespace Flux {
blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier, bias)); blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier, bias));
} }
std::vector<ModulationOut> forward(GGMLRunnerContext* ctx, struct ggml_tensor* vec) { std::vector<ModulationOut> forward(GGMLRunnerContext* ctx, ggml_tensor* vec) {
// x: [N, dim] // x: [N, dim]
// return: [ModulationOut, ModulationOut] // return: [ModulationOut, ModulationOut]
auto lin = std::dynamic_pointer_cast<Linear>(blocks["lin"]); auto lin = std::dynamic_pointer_cast<Linear>(blocks["lin"]);
@ -230,11 +232,11 @@ namespace Flux {
} }
}; };
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx, __STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* shift, ggml_tensor* shift,
struct ggml_tensor* scale, ggml_tensor* scale,
bool skip_reshape = false) { bool skip_reshape = false) {
// x: [N, L, C] // x: [N, L, C]
// scale: [N, C] // scale: [N, C]
// shift: [N, C] // shift: [N, C]
@ -292,7 +294,7 @@ namespace Flux {
} }
} }
std::vector<ModulationOut> get_distil_img_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) { std::vector<ModulationOut> get_distil_img_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
// TODO: not hardcoded? // TODO: not hardcoded?
const int single_blocks_count = 38; const int single_blocks_count = 38;
const int double_blocks_count = 19; const int double_blocks_count = 19;
@ -301,7 +303,7 @@ namespace Flux {
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)}; return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
} }
std::vector<ModulationOut> get_distil_txt_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) { std::vector<ModulationOut> get_distil_txt_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
// TODO: not hardcoded? // TODO: not hardcoded?
const int single_blocks_count = 38; const int single_blocks_count = 38;
const int double_blocks_count = 19; const int double_blocks_count = 19;
@ -310,14 +312,14 @@ namespace Flux {
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)}; return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
} }
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx, std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
struct ggml_tensor* img, ggml_tensor* img,
struct ggml_tensor* txt, ggml_tensor* txt,
struct ggml_tensor* vec, ggml_tensor* vec,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr, ggml_tensor* mask = nullptr,
std::vector<ModulationOut> img_mods = {}, std::vector<ModulationOut> img_mods = {},
std::vector<ModulationOut> txt_mods = {}) { std::vector<ModulationOut> txt_mods = {}) {
// img: [N, n_img_token, hidden_size] // img: [N, n_img_token, hidden_size]
// txt: [N, n_txt_token, hidden_size] // txt: [N, n_txt_token, hidden_size]
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2] // pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
@ -455,17 +457,17 @@ namespace Flux {
} }
} }
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) { ModulationOut get_distil_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
int64_t offset = 3 * idx; int64_t offset = 3 * idx;
return ModulationOut(ctx, vec, offset); return ModulationOut(ctx, vec, offset);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* vec, ggml_tensor* vec,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr, ggml_tensor* mask = nullptr,
std::vector<ModulationOut> mods = {}) { std::vector<ModulationOut> mods = {}) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// pe: [n_token, d_head/2, 2, 2] // pe: [n_token, d_head/2, 2, 2]
// return: [N, n_token, hidden_size] // return: [N, n_token, hidden_size]
@ -491,15 +493,14 @@ namespace Flux {
auto x_mod = Flux::modulate(ctx->ggml_ctx, pre_norm->forward(ctx, x), mod.shift, mod.scale); auto x_mod = Flux::modulate(ctx->ggml_ctx, pre_norm->forward(ctx, x), mod.shift, mod.scale);
auto qkv_mlp = linear1->forward(ctx, x_mod); // [N, n_token, hidden_size * 3 + mlp_hidden_dim*mlp_mult_factor] auto qkv_mlp = linear1->forward(ctx, x_mod); // [N, n_token, hidden_size * 3 + mlp_hidden_dim*mlp_mult_factor]
auto q = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, hidden_size, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], 0);
auto k = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, hidden_size, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], hidden_size * qkv_mlp->nb[0]);
auto v = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, hidden_size, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], hidden_size * 2 * qkv_mlp->nb[0]);
int64_t head_dim = hidden_size / num_heads; int64_t head_dim = hidden_size / num_heads;
q = ggml_reshape_4d(ctx->ggml_ctx, ggml_cont(ctx->ggml_ctx, q), head_dim, num_heads, q->ne[1], q->ne[2]); // [N, n_token, n_head, d_head] auto q = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2],
k = ggml_reshape_4d(ctx->ggml_ctx, ggml_cont(ctx->ggml_ctx, k), head_dim, num_heads, k->ne[1], k->ne[2]); // [N, n_token, n_head, d_head] qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], 0);
v = ggml_reshape_4d(ctx->ggml_ctx, ggml_cont(ctx->ggml_ctx, v), head_dim, num_heads, v->ne[1], v->ne[2]); // [N, n_token, n_head, d_head] auto k = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2],
qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], (qkv_mlp->nb[0]) * hidden_size);
auto v = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2],
qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], (qkv_mlp->nb[0]) * 2 * hidden_size);
q = norm->query_norm(ctx, q); q = norm->query_norm(ctx, q);
k = norm->key_norm(ctx, k); k = norm->key_norm(ctx, k);
@ -538,7 +539,7 @@ namespace Flux {
} }
} }
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) { ModulationOut get_distil_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
int64_t offset = vec->ne[2] - 2; int64_t offset = vec->ne[2] - 2;
int64_t stride = vec->nb[1] * vec->ne[1]; int64_t stride = vec->nb[1] * vec->ne[1];
auto shift = ggml_view_2d(ctx->ggml_ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim] auto shift = ggml_view_2d(ctx->ggml_ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim]
@ -547,15 +548,15 @@ namespace Flux {
return {shift, scale, nullptr}; return {shift, scale, nullptr};
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
// return: [N, n_token, patch_size * patch_size * out_channels] // return: [N, n_token, patch_size * patch_size * out_channels]
auto norm_final = std::dynamic_pointer_cast<LayerNorm>(blocks["norm_final"]); auto norm_final = std::dynamic_pointer_cast<LayerNorm>(blocks["norm_final"]);
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]); auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
struct ggml_tensor *shift, *scale; ggml_tensor *shift, *scale;
if (prune_mod) { if (prune_mod) {
auto mod = get_distil_mod(ctx, c); auto mod = get_distil_mod(ctx, c);
shift = mod.shift; shift = mod.shift;
@ -588,7 +589,7 @@ namespace Flux {
blocks["out_proj"] = std::shared_ptr<GGMLBlock>(new Linear(inner_size, hidden_size, true)); blocks["out_proj"] = std::shared_ptr<GGMLBlock>(new Linear(inner_size, hidden_size, true));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto in_proj = std::dynamic_pointer_cast<Linear>(blocks["in_proj"]); auto in_proj = std::dynamic_pointer_cast<Linear>(blocks["in_proj"]);
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out_proj"]); auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out_proj"]);
@ -611,9 +612,9 @@ namespace Flux {
blocks["embedder.0"] = std::make_shared<Linear>(in_channels + max_freqs * max_freqs, hidden_size_input); blocks["embedder.0"] = std::make_shared<Linear>(in_channels + max_freqs * max_freqs, hidden_size_input);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* dct) { ggml_tensor* dct) {
// x: (B, P^2, C) // x: (B, P^2, C)
// dct: (1, P^2, max_freqs^2) // dct: (1, P^2, max_freqs^2)
// return: (B, P^2, hidden_size_input) // return: (B, P^2, hidden_size_input)
@ -638,9 +639,9 @@ namespace Flux {
blocks["norm"] = std::make_shared<RMSNorm>(hidden_size_x); blocks["norm"] = std::make_shared<RMSNorm>(hidden_size_x);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* s) { ggml_tensor* s) {
// x: (batch_size, n_token, hidden_size_x) // x: (batch_size, n_token, hidden_size_x)
// s: (batch_size, hidden_size_s) // s: (batch_size, hidden_size_s)
// return: (batch_size, n_token, hidden_size_x) // return: (batch_size, n_token, hidden_size_x)
@ -688,8 +689,8 @@ namespace Flux {
blocks["linear"] = std::make_shared<Linear>(hidden_size, out_channels); blocks["linear"] = std::make_shared<Linear>(hidden_size, out_channels);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x) {
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]); auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
@ -707,8 +708,8 @@ namespace Flux {
blocks["conv"] = std::make_shared<Conv2d>(hidden_size, out_channels, std::pair{3, 3}, std::pair{1, 1}, std::pair{1, 1}); blocks["conv"] = std::make_shared<Conv2d>(hidden_size, out_channels, std::pair{3, 3}, std::pair{1, 1}, std::pair{1, 1});
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x) {
// x: [N, C, H, W] // x: [N, C, H, W]
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]); auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
@ -846,79 +847,15 @@ namespace Flux {
} }
} }
struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* img,
int64_t W = x->ne[0]; ggml_tensor* txt,
int64_t H = x->ne[1]; ggml_tensor* timesteps,
ggml_tensor* y,
int pad_h = (params.patch_size - H % params.patch_size) % params.patch_size; ggml_tensor* guidance,
int pad_w = (params.patch_size - W % params.patch_size) % params.patch_size; ggml_tensor* pe,
x = ggml_ext_pad(ctx->ggml_ctx, x, pad_w, pad_h, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled); ggml_tensor* mod_index_arange = nullptr,
return x; std::vector<int> skip_layers = {}) {
}
struct ggml_tensor* patchify(struct ggml_context* ctx,
struct ggml_tensor* x) {
// x: [N, C, H, W]
// return: [N, h*w, C * patch_size * patch_size]
int64_t N = x->ne[3];
int64_t C = x->ne[2];
int64_t H = x->ne[1];
int64_t W = x->ne[0];
int64_t p = params.patch_size;
int64_t h = H / params.patch_size;
int64_t w = W / params.patch_size;
GGML_ASSERT(h * p == H && w * p == W);
x = ggml_reshape_4d(ctx, x, p, w, p, h * C * N); // [N*C*h, p, w, p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, p, p]
x = ggml_reshape_4d(ctx, x, p * p, w * h, C, N); // [N, C, h*w, p*p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, h*w, C, p*p]
x = ggml_reshape_3d(ctx, x, p * p * C, w * h, N); // [N, h*w, C*p*p]
return x;
}
struct ggml_tensor* process_img(GGMLRunnerContext* ctx,
struct ggml_tensor* x) {
// img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size)
x = pad_to_patch_size(ctx, x);
x = patchify(ctx->ggml_ctx, x);
return x;
}
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
struct ggml_tensor* x,
int64_t h,
int64_t w) {
// x: [N, h*w, C*patch_size*patch_size]
// return: [N, C, H, W]
int64_t N = x->ne[2];
int64_t C = x->ne[0] / params.patch_size / params.patch_size;
int64_t H = h * params.patch_size;
int64_t W = w * params.patch_size;
int64_t p = params.patch_size;
GGML_ASSERT(C * p * p == x->ne[0]);
x = ggml_reshape_4d(ctx, x, p * p, C, w * h, N); // [N, h*w, C, p*p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, C, h*w, p*p]
x = ggml_reshape_4d(ctx, x, p, p, w, h * C * N); // [N*C*h, w, p, p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, p, w, p]
x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*p, w*p]
return x;
}
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
struct ggml_tensor* img,
struct ggml_tensor* txt,
struct ggml_tensor* timesteps,
struct ggml_tensor* y,
struct ggml_tensor* guidance,
struct ggml_tensor* pe,
struct ggml_tensor* mod_index_arange = nullptr,
std::vector<int> skip_layers = {}) {
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]); auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]); auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]);
auto final_layer = std::dynamic_pointer_cast<LastLayer>(blocks["final_layer"]); auto final_layer = std::dynamic_pointer_cast<LastLayer>(blocks["final_layer"]);
@ -927,8 +864,8 @@ namespace Flux {
img = img_in->forward(ctx, img); img = img_in->forward(ctx, img);
} }
struct ggml_tensor* vec; ggml_tensor* vec;
struct ggml_tensor* txt_img_mask = nullptr; ggml_tensor* txt_img_mask = nullptr;
if (params.is_chroma) { if (params.is_chroma) {
int64_t mod_index_length = 344; int64_t mod_index_length = 344;
auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]); auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]);
@ -1030,27 +967,27 @@ namespace Flux {
return img; return img;
} }
struct ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx, ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx,
struct ggml_tensor* predicted, ggml_tensor* predicted,
struct ggml_tensor* noisy, ggml_tensor* noisy,
struct ggml_tensor* timesteps) { ggml_tensor* timesteps) {
auto x = ggml_sub(ctx->ggml_ctx, noisy, predicted); auto x = ggml_sub(ctx->ggml_ctx, noisy, predicted);
x = ggml_div(ctx->ggml_ctx, x, timesteps); x = ggml_div(ctx->ggml_ctx, x, timesteps);
return x; return x;
} }
struct ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx, ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* c_concat, ggml_tensor* c_concat,
struct ggml_tensor* y, ggml_tensor* y,
struct ggml_tensor* guidance, ggml_tensor* guidance,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mod_index_arange = nullptr, ggml_tensor* mod_index_arange = nullptr,
struct ggml_tensor* dct = nullptr, ggml_tensor* dct = nullptr,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
int64_t W = x->ne[0]; int64_t W = x->ne[0];
@ -1060,7 +997,7 @@ namespace Flux {
int pad_h = (patch_size - H % patch_size) % patch_size; int pad_h = (patch_size - H % patch_size) % patch_size;
int pad_w = (patch_size - W % patch_size) % patch_size; int pad_w = (patch_size - W % patch_size) % patch_size;
auto img = pad_to_patch_size(ctx, x); auto img = DiT::pad_to_patch_size(ctx, x, params.patch_size, params.patch_size);
auto orig_img = img; auto orig_img = img;
if (params.chroma_radiance_params.fake_patch_size_x2) { if (params.chroma_radiance_params.fake_patch_size_x2) {
@ -1082,7 +1019,7 @@ namespace Flux {
auto nerf_image_embedder = std::dynamic_pointer_cast<NerfEmbedder>(blocks["nerf_image_embedder"]); auto nerf_image_embedder = std::dynamic_pointer_cast<NerfEmbedder>(blocks["nerf_image_embedder"]);
auto nerf_final_layer_conv = std::dynamic_pointer_cast<NerfFinalLayerConv>(blocks["nerf_final_layer_conv"]); auto nerf_final_layer_conv = std::dynamic_pointer_cast<NerfFinalLayerConv>(blocks["nerf_final_layer_conv"]);
auto nerf_pixels = patchify(ctx->ggml_ctx, orig_img); // [N, num_patches, C * patch_size * patch_size] auto nerf_pixels = DiT::patchify(ctx->ggml_ctx, orig_img, patch_size, patch_size); // [N, num_patches, C * patch_size * patch_size]
int64_t num_patches = nerf_pixels->ne[1]; int64_t num_patches = nerf_pixels->ne[1];
nerf_pixels = ggml_reshape_3d(ctx->ggml_ctx, nerf_pixels = ggml_reshape_3d(ctx->ggml_ctx,
nerf_pixels, nerf_pixels,
@ -1102,7 +1039,7 @@ namespace Flux {
img_dct = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, img_dct, 1, 0, 2, 3)); // [N*num_patches, nerf_hidden_size, patch_size*patch_size] img_dct = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, img_dct, 1, 0, 2, 3)); // [N*num_patches, nerf_hidden_size, patch_size*patch_size]
img_dct = ggml_reshape_3d(ctx->ggml_ctx, img_dct, img_dct->ne[0] * img_dct->ne[1], num_patches, img_dct->ne[2] / num_patches); // [N, num_patches, nerf_hidden_size*patch_size*patch_size] img_dct = ggml_reshape_3d(ctx->ggml_ctx, img_dct, img_dct->ne[0] * img_dct->ne[1], num_patches, img_dct->ne[2] / num_patches); // [N, num_patches, nerf_hidden_size*patch_size*patch_size]
img_dct = unpatchify(ctx->ggml_ctx, img_dct, (H + pad_h) / patch_size, (W + pad_w) / patch_size); // [N, nerf_hidden_size, H, W] img_dct = DiT::unpatchify(ctx->ggml_ctx, img_dct, (H + pad_h) / patch_size, (W + pad_w) / patch_size, patch_size, patch_size); // [N, nerf_hidden_size, H, W]
out = nerf_final_layer_conv->forward(ctx, img_dct); // [N, C, H, W] out = nerf_final_layer_conv->forward(ctx, img_dct); // [N, C, H, W]
@ -1113,18 +1050,18 @@ namespace Flux {
return out; return out;
} }
struct ggml_tensor* forward_flux_chroma(GGMLRunnerContext* ctx, ggml_tensor* forward_flux_chroma(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* c_concat, ggml_tensor* c_concat,
struct ggml_tensor* y, ggml_tensor* y,
struct ggml_tensor* guidance, ggml_tensor* guidance,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mod_index_arange = nullptr, ggml_tensor* mod_index_arange = nullptr,
struct ggml_tensor* dct = nullptr, ggml_tensor* dct = nullptr,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
int64_t W = x->ne[0]; int64_t W = x->ne[0];
@ -1134,7 +1071,7 @@ namespace Flux {
int pad_h = (patch_size - H % patch_size) % patch_size; int pad_h = (patch_size - H % patch_size) % patch_size;
int pad_w = (patch_size - W % patch_size) % patch_size; int pad_w = (patch_size - W % patch_size) % patch_size;
auto img = process_img(ctx, x); auto img = DiT::pad_and_patchify(ctx, x, patch_size, patch_size);
int64_t img_tokens = img->ne[1]; int64_t img_tokens = img->ne[1];
if (params.version == VERSION_FLUX_FILL) { if (params.version == VERSION_FLUX_FILL) {
@ -1142,8 +1079,8 @@ namespace Flux {
ggml_tensor* masked = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0); ggml_tensor* masked = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0);
ggml_tensor* mask = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 8 * 8, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C); ggml_tensor* mask = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 8 * 8, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C);
masked = process_img(ctx, masked); masked = DiT::pad_and_patchify(ctx, masked, patch_size, patch_size);
mask = process_img(ctx, mask); mask = DiT::pad_and_patchify(ctx, mask, patch_size, patch_size);
img = ggml_concat(ctx->ggml_ctx, img, ggml_concat(ctx->ggml_ctx, masked, mask, 0), 0); img = ggml_concat(ctx->ggml_ctx, img, ggml_concat(ctx->ggml_ctx, masked, mask, 0), 0);
} else if (params.version == VERSION_FLEX_2) { } else if (params.version == VERSION_FLEX_2) {
@ -1152,21 +1089,21 @@ namespace Flux {
ggml_tensor* mask = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 1, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C); ggml_tensor* mask = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 1, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C);
ggml_tensor* control = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * (C + 1)); ggml_tensor* control = ggml_view_4d(ctx->ggml_ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * (C + 1));
masked = process_img(ctx, masked); masked = DiT::pad_and_patchify(ctx, masked, patch_size, patch_size);
mask = process_img(ctx, mask); mask = DiT::pad_and_patchify(ctx, mask, patch_size, patch_size);
control = process_img(ctx, control); control = DiT::pad_and_patchify(ctx, control, patch_size, patch_size);
img = ggml_concat(ctx->ggml_ctx, img, ggml_concat(ctx->ggml_ctx, ggml_concat(ctx->ggml_ctx, masked, mask, 0), control, 0), 0); img = ggml_concat(ctx->ggml_ctx, img, ggml_concat(ctx->ggml_ctx, ggml_concat(ctx->ggml_ctx, masked, mask, 0), control, 0), 0);
} else if (params.version == VERSION_FLUX_CONTROLS) { } else if (params.version == VERSION_FLUX_CONTROLS) {
GGML_ASSERT(c_concat != nullptr); GGML_ASSERT(c_concat != nullptr);
auto control = process_img(ctx, c_concat); auto control = DiT::pad_and_patchify(ctx, c_concat, patch_size, patch_size);
img = ggml_concat(ctx->ggml_ctx, img, control, 0); img = ggml_concat(ctx->ggml_ctx, img, control, 0);
} }
if (ref_latents.size() > 0) { if (ref_latents.size() > 0) {
for (ggml_tensor* ref : ref_latents) { for (ggml_tensor* ref : ref_latents) {
ref = process_img(ctx, ref); ref = DiT::pad_and_patchify(ctx, ref, patch_size, patch_size);
img = ggml_concat(ctx->ggml_ctx, img, ref, 1); img = ggml_concat(ctx->ggml_ctx, img, ref, 1);
} }
} }
@ -1178,23 +1115,22 @@ namespace Flux {
out = ggml_cont(ctx->ggml_ctx, out); out = ggml_cont(ctx->ggml_ctx, out);
} }
// rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2) out = DiT::unpatchify_and_crop(ctx->ggml_ctx, out, H, W, patch_size, patch_size); // [N, C, H, W]
out = unpatchify(ctx->ggml_ctx, out, (H + pad_h) / patch_size, (W + pad_w) / patch_size); // [N, C, H + pad_h, W + pad_w]
return out; return out;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* c_concat, ggml_tensor* c_concat,
struct ggml_tensor* y, ggml_tensor* y,
struct ggml_tensor* guidance, ggml_tensor* guidance,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mod_index_arange = nullptr, ggml_tensor* mod_index_arange = nullptr,
struct ggml_tensor* dct = nullptr, ggml_tensor* dct = nullptr,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
// Forward pass of DiT. // Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
// timestep: (N,) tensor of diffusion timesteps // timestep: (N,) tensor of diffusion timesteps
@ -1242,6 +1178,7 @@ namespace Flux {
std::vector<float> pe_vec; std::vector<float> pe_vec;
std::vector<float> mod_index_arange_vec; std::vector<float> mod_index_arange_vec;
std::vector<float> dct_vec; std::vector<float> dct_vec;
sd::Tensor<float> guidance_tensor;
SDVersion version; SDVersion version;
bool use_mask = false; bool use_mask = false;
@ -1363,7 +1300,7 @@ namespace Flux {
return "flux"; return "flux";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
flux.get_param_tensors(tensors, prefix); flux.get_param_tensors(tensors, prefix);
} }
@ -1417,29 +1354,42 @@ namespace Flux {
return dct; return dct;
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor = {},
struct ggml_tensor* c_concat, const sd::Tensor<float>& c_concat_tensor = {},
struct ggml_tensor* y, const sd::Tensor<float>& y_tensor = {},
struct ggml_tensor* guidance, const sd::Tensor<float>& guidance_tensor = {},
std::vector<ggml_tensor*> ref_latents = {}, const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
bool increase_ref_index = false, bool increase_ref_index = false,
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
GGML_ASSERT(x->ne[3] == 1); ggml_tensor* x = make_input(x_tensor);
struct ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE); ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
struct ggml_tensor* mod_index_arange = nullptr; ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
struct ggml_tensor* dct = nullptr; // for chroma radiance ggml_tensor* y = make_optional_input(y_tensor);
if (flux_params.guidance_embed || flux_params.is_chroma) {
x = to_backend(x); if (!guidance_tensor.empty()) {
context = to_backend(context); this->guidance_tensor = guidance_tensor;
if (c_concat != nullptr) { if (flux_params.is_chroma) {
c_concat = to_backend(c_concat); this->guidance_tensor.fill_(0.f);
}
}
}
ggml_tensor* guidance = make_optional_input(this->guidance_tensor);
std::vector<ggml_tensor*> ref_latents;
ref_latents.reserve(ref_latents_tensor.size());
for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
} }
if (flux_params.is_chroma) {
guidance = ggml_set_f32(guidance, 0);
GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
ggml_tensor* mod_index_arange = nullptr;
ggml_tensor* dct = nullptr; // for chroma radiance
if (flux_params.is_chroma) {
if (!use_mask) { if (!use_mask) {
y = nullptr; y = nullptr;
} }
@ -1449,16 +1399,6 @@ namespace Flux {
mod_index_arange = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, mod_index_arange_vec.size()); mod_index_arange = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, mod_index_arange_vec.size());
set_backend_tensor_data(mod_index_arange, mod_index_arange_vec.data()); set_backend_tensor_data(mod_index_arange, mod_index_arange_vec.data());
} }
y = to_backend(y);
timesteps = to_backend(timesteps);
if (flux_params.guidance_embed || flux_params.is_chroma) {
guidance = to_backend(guidance);
}
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
}
std::set<int> txt_arange_dims; std::set<int> txt_arange_dims;
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
txt_arange_dims = {3}; txt_arange_dims = {3};
@ -1501,89 +1441,98 @@ namespace Flux {
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = flux.forward(&runner_ctx, ggml_tensor* out = flux.forward(&runner_ctx,
x, x,
timesteps, timesteps,
context, context,
c_concat, c_concat,
y, y,
guidance, guidance,
pe, pe,
mod_index_arange, mod_index_arange,
dct, dct,
ref_latents, ref_latents,
skip_layers); skip_layers);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context = {},
struct ggml_tensor* c_concat, const sd::Tensor<float>& c_concat = {},
struct ggml_tensor* y, const sd::Tensor<float>& y = {},
struct ggml_tensor* guidance, const sd::Tensor<float>& guidance = {},
std::vector<ggml_tensor*> ref_latents = {}, const std::vector<sd::Tensor<float>>& ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
struct ggml_tensor** output = nullptr, std::vector<int> skip_layers = std::vector<int>()) {
struct ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
// guidance: [N, ] // guidance: [N, ]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers); return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
return result;
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// cpu f16: // cpu f16:
// cuda f16: nan // cuda f16: nan
// cuda q8_0: pass // cuda q8_0: pass
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 128, 1); sd::Tensor<float> x({16, 16, 128, 1});
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
// auto x = load_tensor_from_file(work_ctx, "chroma_x.bin"); // auto x = load_tensor_from_file(ctx, "chroma_x.bin");
// print_ggml_tensor(x); // print_ggml_tensor(x);
std::vector<float> timesteps_vec(1, 1.f); std::vector<float> timesteps_vec(1, 1.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec); auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
std::vector<float> guidance_vec(1, 0.f); std::vector<float> guidance_vec(1, 0.f);
auto guidance = vector_to_ggml_tensor(work_ctx, guidance_vec); auto guidance = sd::Tensor<float>::from_vector(guidance_vec);
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 15360, 256, 1); sd::Tensor<float> context({15360, 256, 1});
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
// auto context = load_tensor_from_file(work_ctx, "chroma_context.bin"); // auto context = load_tensor_from_file(ctx, "chroma_context.bin");
// print_ggml_tensor(context); // print_ggml_tensor(context);
// auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, 1); // auto y = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 768, 1);
// ggml_set_f32(y, 0.01f); // ggml_set_f32(y, 0.01f);
auto y = nullptr; auto y = nullptr;
// print_ggml_tensor(y); // print_ggml_tensor(y);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx); auto out_opt = compute(8,
int64_t t1 = ggml_time_ms(); x,
timesteps,
context,
{},
{},
guidance,
{},
false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("flux test done in %lldms", t1 - t0); LOG_DEBUG("flux test done in %lldms", t1 - t0);
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,234 +1,300 @@
#include <cstddef> #include <algorithm>
#include <cstdint> #include <cstddef>
#include "ggml.h" #include <cstdint>
#include "ggml.h"
const float wan_21_latent_rgb_proj[16][3] = { #include "tensor.hpp"
{0.015123f, -0.148418f, 0.479828f},
{0.003652f, -0.010680f, -0.037142f}, const float wan_21_latent_rgb_proj[16][3] = {
{0.212264f, 0.063033f, 0.016779f}, {0.015123f, -0.148418f, 0.479828f},
{0.232999f, 0.406476f, 0.220125f}, {0.003652f, -0.010680f, -0.037142f},
{-0.051864f, -0.082384f, -0.069396f}, {0.212264f, 0.063033f, 0.016779f},
{0.085005f, -0.161492f, 0.010689f}, {0.232999f, 0.406476f, 0.220125f},
{-0.245369f, -0.506846f, -0.117010f}, {-0.051864f, -0.082384f, -0.069396f},
{-0.151145f, 0.017721f, 0.007207f}, {0.085005f, -0.161492f, 0.010689f},
{-0.293239f, -0.207936f, -0.421135f}, {-0.245369f, -0.506846f, -0.117010f},
{-0.187721f, 0.050783f, 0.177649f}, {-0.151145f, 0.017721f, 0.007207f},
{-0.013067f, 0.265964f, 0.166578f}, {-0.293239f, -0.207936f, -0.421135f},
{0.028327f, 0.109329f, 0.108642f}, {-0.187721f, 0.050783f, 0.177649f},
{-0.205343f, 0.043991f, 0.148914f}, {-0.013067f, 0.265964f, 0.166578f},
{0.014307f, -0.048647f, -0.007219f}, {0.028327f, 0.109329f, 0.108642f},
{0.217150f, 0.053074f, 0.319923f}, {-0.205343f, 0.043991f, 0.148914f},
{0.155357f, 0.083156f, 0.064780f}}; {0.014307f, -0.048647f, -0.007219f},
float wan_21_latent_rgb_bias[3] = {-0.270270f, -0.234976f, -0.456853f}; {0.217150f, 0.053074f, 0.319923f},
{0.155357f, 0.083156f, 0.064780f}};
const float wan_22_latent_rgb_proj[48][3] = { float wan_21_latent_rgb_bias[3] = {-0.270270f, -0.234976f, -0.456853f};
{0.017126f, -0.027230f, -0.019257f},
{-0.113739f, -0.028715f, -0.022885f}, const float wan_22_latent_rgb_proj[48][3] = {
{-0.000106f, 0.021494f, 0.004629f}, {0.017126f, -0.027230f, -0.019257f},
{-0.013273f, -0.107137f, -0.033638f}, {-0.113739f, -0.028715f, -0.022885f},
{-0.000381f, 0.000279f, 0.025877f}, {-0.000106f, 0.021494f, 0.004629f},
{-0.014216f, -0.003975f, 0.040528f}, {-0.013273f, -0.107137f, -0.033638f},
{0.001638f, -0.000748f, 0.011022f}, {-0.000381f, 0.000279f, 0.025877f},
{0.029238f, -0.006697f, 0.035933f}, {-0.014216f, -0.003975f, 0.040528f},
{0.021641f, -0.015874f, 0.040531f}, {0.001638f, -0.000748f, 0.011022f},
{-0.101984f, -0.070160f, -0.028855f}, {0.029238f, -0.006697f, 0.035933f},
{0.033207f, -0.021068f, 0.002663f}, {0.021641f, -0.015874f, 0.040531f},
{-0.104711f, 0.121673f, 0.102981f}, {-0.101984f, -0.070160f, -0.028855f},
{0.082647f, -0.004991f, 0.057237f}, {0.033207f, -0.021068f, 0.002663f},
{-0.027375f, 0.031581f, 0.006868f}, {-0.104711f, 0.121673f, 0.102981f},
{-0.045434f, 0.029444f, 0.019287f}, {0.082647f, -0.004991f, 0.057237f},
{-0.046572f, -0.012537f, 0.006675f}, {-0.027375f, 0.031581f, 0.006868f},
{0.074709f, 0.033690f, 0.025289f}, {-0.045434f, 0.029444f, 0.019287f},
{-0.008251f, -0.002745f, -0.006999f}, {-0.046572f, -0.012537f, 0.006675f},
{0.012685f, -0.061856f, -0.048658f}, {0.074709f, 0.033690f, 0.025289f},
{0.042304f, -0.007039f, 0.000295f}, {-0.008251f, -0.002745f, -0.006999f},
{-0.007644f, -0.060843f, -0.033142f}, {0.012685f, -0.061856f, -0.048658f},
{0.159909f, 0.045628f, 0.367541f}, {0.042304f, -0.007039f, 0.000295f},
{0.095171f, 0.086438f, 0.010271f}, {-0.007644f, -0.060843f, -0.033142f},
{0.006812f, 0.019643f, 0.029637f}, {0.159909f, 0.045628f, 0.367541f},
{0.003467f, -0.010705f, 0.014252f}, {0.095171f, 0.086438f, 0.010271f},
{-0.099681f, -0.066272f, -0.006243f}, {0.006812f, 0.019643f, 0.029637f},
{0.047357f, 0.037040f, 0.000185f}, {0.003467f, -0.010705f, 0.014252f},
{-0.041797f, -0.089225f, -0.032257f}, {-0.099681f, -0.066272f, -0.006243f},
{0.008928f, 0.017028f, 0.018684f}, {0.047357f, 0.037040f, 0.000185f},
{-0.042255f, 0.016045f, 0.006849f}, {-0.041797f, -0.089225f, -0.032257f},
{0.011268f, 0.036462f, 0.037387f}, {0.008928f, 0.017028f, 0.018684f},
{0.011553f, -0.016375f, -0.048589f}, {-0.042255f, 0.016045f, 0.006849f},
{0.046266f, -0.027189f, 0.056979f}, {0.011268f, 0.036462f, 0.037387f},
{0.009640f, -0.017576f, 0.030324f}, {0.011553f, -0.016375f, -0.048589f},
{-0.045794f, -0.036083f, -0.010616f}, {0.046266f, -0.027189f, 0.056979f},
{0.022418f, 0.039783f, -0.032939f}, {0.009640f, -0.017576f, 0.030324f},
{-0.052714f, -0.015525f, 0.007438f}, {-0.045794f, -0.036083f, -0.010616f},
{0.193004f, 0.223541f, 0.264175f}, {0.022418f, 0.039783f, -0.032939f},
{-0.059406f, -0.008188f, 0.022867f}, {-0.052714f, -0.015525f, 0.007438f},
{-0.156742f, -0.263791f, -0.007385f}, {0.193004f, 0.223541f, 0.264175f},
{-0.015717f, 0.016570f, 0.033969f}, {-0.059406f, -0.008188f, 0.022867f},
{0.037969f, 0.109835f, 0.200449f}, {-0.156742f, -0.263791f, -0.007385f},
{-0.000782f, -0.009566f, -0.008058f}, {-0.015717f, 0.016570f, 0.033969f},
{0.010709f, 0.052960f, -0.044195f}, {0.037969f, 0.109835f, 0.200449f},
{0.017271f, 0.045839f, 0.034569f}, {-0.000782f, -0.009566f, -0.008058f},
{0.009424f, 0.013088f, -0.001714f}, {0.010709f, 0.052960f, -0.044195f},
{-0.024805f, -0.059378f, -0.033756f}, {0.017271f, 0.045839f, 0.034569f},
{-0.078293f, 0.029070f, 0.026129f}}; {0.009424f, 0.013088f, -0.001714f},
float wan_22_latent_rgb_bias[3] = {0.013160f, -0.096492f, -0.071323f}; {-0.024805f, -0.059378f, -0.033756f},
{-0.078293f, 0.029070f, 0.026129f}};
const float flux_latent_rgb_proj[16][3] = { float wan_22_latent_rgb_bias[3] = {0.013160f, -0.096492f, -0.071323f};
{-0.041168f, 0.019917f, 0.097253f},
{0.028096f, 0.026730f, 0.129576f}, const float flux_latent_rgb_proj[16][3] = {
{0.065618f, -0.067950f, -0.014651f}, {-0.041168f, 0.019917f, 0.097253f},
{-0.012998f, -0.014762f, 0.081251f}, {0.028096f, 0.026730f, 0.129576f},
{0.078567f, 0.059296f, -0.024687f}, {0.065618f, -0.067950f, -0.014651f},
{-0.015987f, -0.003697f, 0.005012f}, {-0.012998f, -0.014762f, 0.081251f},
{0.033605f, 0.138999f, 0.068517f}, {0.078567f, 0.059296f, -0.024687f},
{-0.024450f, -0.063567f, -0.030101f}, {-0.015987f, -0.003697f, 0.005012f},
{-0.040194f, -0.016710f, 0.127185f}, {0.033605f, 0.138999f, 0.068517f},
{0.112681f, 0.088764f, -0.041940f}, {-0.024450f, -0.063567f, -0.030101f},
{-0.023498f, 0.093664f, 0.025543f}, {-0.040194f, -0.016710f, 0.127185f},
{0.082899f, 0.048320f, 0.007491f}, {0.112681f, 0.088764f, -0.041940f},
{0.075712f, 0.074139f, 0.081965f}, {-0.023498f, 0.093664f, 0.025543f},
{-0.143501f, 0.018263f, -0.136138f}, {0.082899f, 0.048320f, 0.007491f},
{-0.025767f, -0.082035f, -0.040023f}, {0.075712f, 0.074139f, 0.081965f},
{-0.111849f, -0.055589f, -0.032361f}}; {-0.143501f, 0.018263f, -0.136138f},
float flux_latent_rgb_bias[3] = {0.024600f, -0.006937f, -0.008089f}; {-0.025767f, -0.082035f, -0.040023f},
{-0.111849f, -0.055589f, -0.032361f}};
const float flux2_latent_rgb_proj[32][3] = { float flux_latent_rgb_bias[3] = {0.024600f, -0.006937f, -0.008089f};
{0.000736f, -0.008385f, -0.019710f},
{-0.001352f, -0.016392f, 0.020693f}, const float flux2_latent_rgb_proj[32][3] = {
{-0.006376f, 0.002428f, 0.036736f}, {0.000736f, -0.008385f, -0.019710f},
{0.039384f, 0.074167f, 0.119789f}, {-0.001352f, -0.016392f, 0.020693f},
{0.007464f, -0.005705f, -0.004734f}, {-0.006376f, 0.002428f, 0.036736f},
{-0.004086f, 0.005287f, -0.000409f}, {0.039384f, 0.074167f, 0.119789f},
{-0.032835f, 0.050802f, -0.028120f}, {0.007464f, -0.005705f, -0.004734f},
{-0.003158f, -0.000835f, 0.000406f}, {-0.004086f, 0.005287f, -0.000409f},
{-0.112840f, -0.084337f, -0.023083f}, {-0.032835f, 0.050802f, -0.028120f},
{0.001462f, -0.006656f, 0.000549f}, {-0.003158f, -0.000835f, 0.000406f},
{-0.009980f, -0.007480f, 0.009702f}, {-0.112840f, -0.084337f, -0.023083f},
{0.032540f, 0.000214f, -0.061388f}, {0.001462f, -0.006656f, 0.000549f},
{0.011023f, 0.000694f, 0.007143f}, {-0.009980f, -0.007480f, 0.009702f},
{-0.001468f, -0.006723f, -0.001678f}, {0.032540f, 0.000214f, -0.061388f},
{-0.005921f, -0.010320f, -0.003907f}, {0.011023f, 0.000694f, 0.007143f},
{-0.028434f, 0.027584f, 0.018457f}, {-0.001468f, -0.006723f, -0.001678f},
{0.014349f, 0.011523f, 0.000441f}, {-0.005921f, -0.010320f, -0.003907f},
{0.009874f, 0.003081f, 0.001507f}, {-0.028434f, 0.027584f, 0.018457f},
{0.002218f, 0.005712f, 0.001563f}, {0.014349f, 0.011523f, 0.000441f},
{0.053010f, -0.019844f, 0.008683f}, {0.009874f, 0.003081f, 0.001507f},
{-0.002507f, 0.005384f, 0.000938f}, {0.002218f, 0.005712f, 0.001563f},
{-0.002177f, -0.011366f, 0.003559f}, {0.053010f, -0.019844f, 0.008683f},
{-0.000261f, 0.015121f, -0.003240f}, {-0.002507f, 0.005384f, 0.000938f},
{-0.003944f, -0.002083f, 0.005043f}, {-0.002177f, -0.011366f, 0.003559f},
{-0.009138f, 0.011336f, 0.003781f}, {-0.000261f, 0.015121f, -0.003240f},
{0.011429f, 0.003985f, -0.003855f}, {-0.003944f, -0.002083f, 0.005043f},
{0.010518f, -0.005586f, 0.010131f}, {-0.009138f, 0.011336f, 0.003781f},
{0.007883f, 0.002912f, -0.001473f}, {0.011429f, 0.003985f, -0.003855f},
{-0.003318f, -0.003160f, 0.003684f}, {0.010518f, -0.005586f, 0.010131f},
{-0.034560f, -0.008740f, 0.012996f}, {0.007883f, 0.002912f, -0.001473f},
{0.000166f, 0.001079f, -0.012153f}, {-0.003318f, -0.003160f, 0.003684f},
{0.017772f, 0.000937f, -0.011953f}}; {-0.034560f, -0.008740f, 0.012996f},
float flux2_latent_rgb_bias[3] = {-0.028738f, -0.098463f, -0.107619f}; {0.000166f, 0.001079f, -0.012153f},
{0.017772f, 0.000937f, -0.011953f}};
// This one was taken straight from float flux2_latent_rgb_bias[3] = {-0.028738f, -0.098463f, -0.107619f};
// https://github.com/Stability-AI/sd3.5/blob/8565799a3b41eb0c7ba976d18375f0f753f56402/sd3_impls.py#L288-L303
// (MiT Licence) // This one was taken straight from
const float sd3_latent_rgb_proj[16][3] = { // https://github.com/Stability-AI/sd3.5/blob/8565799a3b41eb0c7ba976d18375f0f753f56402/sd3_impls.py#L288-L303
{-0.0645f, 0.0177f, 0.1052f}, // (MiT Licence)
{0.0028f, 0.0312f, 0.0650f}, const float sd3_latent_rgb_proj[16][3] = {
{0.1848f, 0.0762f, 0.0360f}, {-0.0645f, 0.0177f, 0.1052f},
{0.0944f, 0.0360f, 0.0889f}, {0.0028f, 0.0312f, 0.0650f},
{0.0897f, 0.0506f, -0.0364f}, {0.1848f, 0.0762f, 0.0360f},
{-0.0020f, 0.1203f, 0.0284f}, {0.0944f, 0.0360f, 0.0889f},
{0.0855f, 0.0118f, 0.0283f}, {0.0897f, 0.0506f, -0.0364f},
{-0.0539f, 0.0658f, 0.1047f}, {-0.0020f, 0.1203f, 0.0284f},
{-0.0057f, 0.0116f, 0.0700f}, {0.0855f, 0.0118f, 0.0283f},
{-0.0412f, 0.0281f, -0.0039f}, {-0.0539f, 0.0658f, 0.1047f},
{0.1106f, 0.1171f, 0.1220f}, {-0.0057f, 0.0116f, 0.0700f},
{-0.0248f, 0.0682f, -0.0481f}, {-0.0412f, 0.0281f, -0.0039f},
{0.0815f, 0.0846f, 0.1207f}, {0.1106f, 0.1171f, 0.1220f},
{-0.0120f, -0.0055f, -0.0867f}, {-0.0248f, 0.0682f, -0.0481f},
{-0.0749f, -0.0634f, -0.0456f}, {0.0815f, 0.0846f, 0.1207f},
{-0.1418f, -0.1457f, -0.1259f}, {-0.0120f, -0.0055f, -0.0867f},
}; {-0.0749f, -0.0634f, -0.0456f},
float sd3_latent_rgb_bias[3] = {0, 0, 0}; {-0.1418f, -0.1457f, -0.1259f},
};
const float sdxl_latent_rgb_proj[4][3] = { float sd3_latent_rgb_bias[3] = {0, 0, 0};
{0.258303f, 0.277640f, 0.329699f},
{-0.299701f, 0.105446f, 0.014194f}, const float sdxl_latent_rgb_proj[4][3] = {
{0.050522f, 0.186163f, -0.143257f}, {0.258303f, 0.277640f, 0.329699f},
{-0.211938f, -0.149892f, -0.080036f}}; {-0.299701f, 0.105446f, 0.014194f},
float sdxl_latent_rgb_bias[3] = {0.144381f, -0.033313f, 0.007061f}; {0.050522f, 0.186163f, -0.143257f},
{-0.211938f, -0.149892f, -0.080036f}};
const float sd_latent_rgb_proj[4][3] = { float sdxl_latent_rgb_bias[3] = {0.144381f, -0.033313f, 0.007061f};
{0.337366f, 0.216344f, 0.257386f},
{0.165636f, 0.386828f, 0.046994f}, const float sd_latent_rgb_proj[4][3] = {
{-0.267803f, 0.237036f, 0.223517f}, {0.337366f, 0.216344f, 0.257386f},
{-0.178022f, -0.200862f, -0.678514f}}; {0.165636f, 0.386828f, 0.046994f},
float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f}; {-0.267803f, 0.237036f, 0.223517f},
{-0.178022f, -0.200862f, -0.678514f}};
void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) { float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f};
size_t buffer_head = 0;
void preview_latent_video(uint8_t* buffer, ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
uint32_t latent_width = static_cast<uint32_t>(latents->ne[0]); size_t buffer_head = 0;
uint32_t latent_height = static_cast<uint32_t>(latents->ne[1]);
uint32_t dim = static_cast<uint32_t>(latents->ne[ggml_n_dims(latents) - 1]); uint32_t latent_width = static_cast<uint32_t>(latents->ne[0]);
uint32_t frames = 1; uint32_t latent_height = static_cast<uint32_t>(latents->ne[1]);
if (ggml_n_dims(latents) == 4) { uint32_t dim = static_cast<uint32_t>(latents->ne[ggml_n_dims(latents) - 1]);
frames = static_cast<uint32_t>(latents->ne[2]); uint32_t frames = 1;
} if (ggml_n_dims(latents) == 4) {
frames = static_cast<uint32_t>(latents->ne[2]);
uint32_t rgb_width = latent_width * patch_size; }
uint32_t rgb_height = latent_height * patch_size;
uint32_t rgb_width = latent_width * patch_size;
uint32_t unpatched_dim = dim / (patch_size * patch_size); uint32_t rgb_height = latent_height * patch_size;
for (uint32_t k = 0; k < frames; k++) { uint32_t unpatched_dim = dim / (patch_size * patch_size);
for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) {
for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) { for (uint32_t k = 0; k < frames; k++) {
int latent_x = rgb_x / patch_size; for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) {
int latent_y = rgb_y / patch_size; for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) {
int latent_x = rgb_x / patch_size;
int channel_offset = 0; int latent_y = rgb_y / patch_size;
if (patch_size > 1) {
channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size)); int channel_offset = 0;
} if (patch_size > 1) {
channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size));
size_t latent_id = (latent_x * latents->nb[0] + latent_y * latents->nb[1] + k * latents->nb[2]); }
// should be incremented by 1 for each pixel size_t latent_id = (latent_x * latents->nb[0] + latent_y * latents->nb[1] + k * latents->nb[2]);
size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x;
// should be incremented by 1 for each pixel
float r = 0, g = 0, b = 0; size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x;
if (latent_rgb_proj != nullptr) {
for (uint32_t d = 0; d < unpatched_dim; d++) { float r = 0, g = 0, b = 0;
float value = *(float*)((char*)latents->data + latent_id + (d * patch_size * patch_size + channel_offset) * latents->nb[ggml_n_dims(latents) - 1]); if (latent_rgb_proj != nullptr) {
r += value * latent_rgb_proj[d][0]; for (uint32_t d = 0; d < unpatched_dim; d++) {
g += value * latent_rgb_proj[d][1]; float value = *(float*)((char*)latents->data + latent_id + (d * patch_size * patch_size + channel_offset) * latents->nb[ggml_n_dims(latents) - 1]);
b += value * latent_rgb_proj[d][2]; r += value * latent_rgb_proj[d][0];
} g += value * latent_rgb_proj[d][1];
} else { b += value * latent_rgb_proj[d][2];
// interpret first 3 channels as RGB }
r = *(float*)((char*)latents->data + latent_id + 0 * latents->nb[ggml_n_dims(latents) - 1]); } else {
g = *(float*)((char*)latents->data + latent_id + 1 * latents->nb[ggml_n_dims(latents) - 1]); // interpret first 3 channels as RGB
b = *(float*)((char*)latents->data + latent_id + 2 * latents->nb[ggml_n_dims(latents) - 1]); r = *(float*)((char*)latents->data + latent_id + 0 * latents->nb[ggml_n_dims(latents) - 1]);
} g = *(float*)((char*)latents->data + latent_id + 1 * latents->nb[ggml_n_dims(latents) - 1]);
if (latent_rgb_bias != nullptr) { b = *(float*)((char*)latents->data + latent_id + 2 * latents->nb[ggml_n_dims(latents) - 1]);
// bias }
r += latent_rgb_bias[0]; if (latent_rgb_bias != nullptr) {
g += latent_rgb_bias[1]; // bias
b += latent_rgb_bias[2]; r += latent_rgb_bias[0];
} g += latent_rgb_bias[1];
// change range b += latent_rgb_bias[2];
r = r * .5f + .5f; }
g = g * .5f + .5f; // change range
b = b * .5f + .5f; r = r * .5f + .5f;
g = g * .5f + .5f;
// clamp rgb values to [0,1] range b = b * .5f + .5f;
r = r >= 0 ? r <= 1 ? r : 1 : 0;
g = g >= 0 ? g <= 1 ? g : 1 : 0; // clamp rgb values to [0,1] range
b = b >= 0 ? b <= 1 ? b : 1 : 0; r = r >= 0 ? r <= 1 ? r : 1 : 0;
g = g >= 0 ? g <= 1 ? g : 1 : 0;
buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255); b = b >= 0 ? b <= 1 ? b : 1 : 0;
buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255);
buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255); buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255);
} buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255);
} buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255);
} }
} }
}
}
static inline bool preview_latent_tensor_is_video(const sd::Tensor<float>& latents) {
return latents.dim() == 5;
}
void preview_latent_video(uint8_t* buffer, const sd::Tensor<float>& latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
uint32_t latent_width = static_cast<uint32_t>(latents.shape()[0]);
uint32_t latent_height = static_cast<uint32_t>(latents.shape()[1]);
bool is_video = preview_latent_tensor_is_video(latents);
uint32_t frames = is_video ? static_cast<uint32_t>(latents.shape()[2]) : 1;
uint32_t dim = is_video ? static_cast<uint32_t>(latents.shape()[3]) : static_cast<uint32_t>(latents.shape()[2]);
uint32_t rgb_width = latent_width * patch_size;
uint32_t rgb_height = latent_height * patch_size;
uint32_t unpatched_dim = dim / (patch_size * patch_size);
for (uint32_t k = 0; k < frames; k++) {
for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) {
for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) {
uint32_t latent_x = rgb_x / patch_size;
uint32_t latent_y = rgb_y / patch_size;
uint32_t channel_offset = 0;
if (patch_size > 1) {
channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size));
}
size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x;
auto latent_value = [&](uint32_t latent_channel) -> float {
return is_video
? latents.values()[latent_x + latent_width * (latent_y + latent_height * (k + frames * latent_channel))]
: latents.values()[latent_x + latent_width * (latent_y + latent_height * latent_channel)];
};
float r = 0.f, g = 0.f, b = 0.f;
if (latent_rgb_proj != nullptr) {
for (uint32_t d = 0; d < unpatched_dim; d++) {
uint32_t latent_channel = d * patch_size * patch_size + channel_offset;
float value = latent_value(latent_channel);
r += value * latent_rgb_proj[d][0];
g += value * latent_rgb_proj[d][1];
b += value * latent_rgb_proj[d][2];
}
} else {
r = latent_value(0);
g = latent_value(1);
b = latent_value(2);
}
if (latent_rgb_bias != nullptr) {
r += latent_rgb_bias[0];
g += latent_rgb_bias[1];
b += latent_rgb_bias[2];
}
r = std::min(1.0f, std::max(0.0f, r * .5f + .5f));
g = std::min(1.0f, std::max(0.0f, g * .5f + .5f));
b = std::min(1.0f, std::max(0.0f, b * .5f + .5f));
buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255);
buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255);
buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255);
}
}
}
}

View File

@ -19,6 +19,7 @@
#include "json.hpp" #include "json.hpp"
#include "rope.hpp" #include "rope.hpp"
#include "tokenize_util.h" #include "tokenize_util.h"
#include "vocab/vocab.h"
namespace LLM { namespace LLM {
constexpr int LLM_GRAPH_SIZE = 10240; constexpr int LLM_GRAPH_SIZE = 10240;
@ -193,6 +194,7 @@ namespace LLM {
bool padding = false) { bool padding = false) {
if (add_bos_token) { if (add_bos_token) {
tokens.insert(tokens.begin(), BOS_TOKEN_ID); tokens.insert(tokens.begin(), BOS_TOKEN_ID);
weights.insert(weights.begin(), 1.f);
} }
if (max_length > 0 && padding) { if (max_length > 0 && padding) {
size_t n = static_cast<size_t>(std::ceil(tokens.size() * 1.f / max_length)); size_t n = static_cast<size_t>(std::ceil(tokens.size() * 1.f / max_length));
@ -365,7 +367,7 @@ namespace LLM {
if (merges_utf8_str.size() > 0) { if (merges_utf8_str.size() > 0) {
load_from_merges(merges_utf8_str); load_from_merges(merges_utf8_str);
} else { } else {
load_from_merges(ModelLoader::load_qwen2_merges()); load_from_merges(load_qwen2_merges());
} }
} }
}; };
@ -466,7 +468,7 @@ namespace LLM {
if (merges_utf8_str.size() > 0 && vocab_utf8_str.size() > 0) { if (merges_utf8_str.size() > 0 && vocab_utf8_str.size() > 0) {
load_from_merges(merges_utf8_str, vocab_utf8_str); load_from_merges(merges_utf8_str, vocab_utf8_str);
} else { } else {
load_from_merges(ModelLoader::load_mistral_merges(), ModelLoader::load_mistral_vocab_json()); load_from_merges(load_mistral_merges(), load_mistral_vocab_json());
} }
} }
}; };
@ -521,7 +523,7 @@ namespace LLM {
blocks["down_proj"] = std::shared_ptr<GGMLBlock>(new Linear(intermediate_size, hidden_size, bias)); blocks["down_proj"] = std::shared_ptr<GGMLBlock>(new Linear(intermediate_size, hidden_size, bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]); auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]); auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
@ -581,7 +583,7 @@ namespace LLM {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N*grid_t*grid_h*grid_w, in_channels, temporal_patch_size*patch_size*patch_size] // x: [N*grid_t*grid_h*grid_w, in_channels, temporal_patch_size*patch_size*patch_size]
// return: [N*grid_t*grid_h*grid_w, embed_dim] // return: [N*grid_t*grid_h*grid_w, embed_dim]
x = ggml_reshape_4d(ctx->ggml_ctx, x = ggml_reshape_4d(ctx->ggml_ctx,
@ -630,7 +632,7 @@ namespace LLM {
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, dim)); blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, dim));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto ln_q = std::dynamic_pointer_cast<RMSNorm>(blocks["ln_q"]); auto ln_q = std::dynamic_pointer_cast<RMSNorm>(blocks["ln_q"]);
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]); auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["mlp.2"]); auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["mlp.2"]);
@ -667,10 +669,10 @@ namespace LLM {
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size)); blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr) { ggml_tensor* mask = nullptr) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
int64_t n_token = x->ne[1]; int64_t n_token = x->ne[1];
int64_t N = x->ne[2]; int64_t N = x->ne[2];
@ -717,10 +719,10 @@ namespace LLM {
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new RMSNorm(hidden_size, eps)); blocks["norm2"] = std::shared_ptr<GGMLBlock>(new RMSNorm(hidden_size, eps));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr) { ggml_tensor* mask = nullptr) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
auto attn = std::dynamic_pointer_cast<VisionAttention>(blocks["attn"]); auto attn = std::dynamic_pointer_cast<VisionAttention>(blocks["attn"]);
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]); auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
@ -777,12 +779,12 @@ namespace LLM {
blocks["merger"] = std::shared_ptr<GGMLBlock>(new PatchMerger(out_hidden_size, hidden_size, spatial_merge_size)); blocks["merger"] = std::shared_ptr<GGMLBlock>(new PatchMerger(out_hidden_size, hidden_size, spatial_merge_size));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* pixel_values, ggml_tensor* pixel_values,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* window_index, ggml_tensor* window_index,
struct ggml_tensor* window_inverse_index, ggml_tensor* window_inverse_index,
struct ggml_tensor* window_mask) { ggml_tensor* window_mask) {
// pixel_values: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw] // pixel_values: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw]
// window_index: [grid_t*(H/mh/ph)*(W/mw/pw)] // window_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
// window_inverse_index: [grid_t*(H/mh/ph)*(W/mw/pw)] // window_inverse_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
@ -835,10 +837,10 @@ namespace LLM {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* input_pos, ggml_tensor* input_pos,
struct ggml_tensor* attention_mask = nullptr) { ggml_tensor* attention_mask = nullptr) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
int64_t n_token = x->ne[1]; int64_t n_token = x->ne[1];
int64_t N = x->ne[2]; int64_t N = x->ne[2];
@ -897,10 +899,10 @@ namespace LLM {
blocks["post_attention_layernorm"] = std::make_shared<RMSNorm>(params.hidden_size, params.rms_norm_eps); blocks["post_attention_layernorm"] = std::make_shared<RMSNorm>(params.hidden_size, params.rms_norm_eps);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* input_pos, ggml_tensor* input_pos,
struct ggml_tensor* attention_mask = nullptr) { ggml_tensor* attention_mask = nullptr) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
auto self_attn = std::dynamic_pointer_cast<Attention>(blocks["self_attn"]); auto self_attn = std::dynamic_pointer_cast<Attention>(blocks["self_attn"]);
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]); auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
@ -935,12 +937,12 @@ namespace LLM {
blocks["norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(params.hidden_size, params.rms_norm_eps)); blocks["norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(params.hidden_size, params.rms_norm_eps));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* input_ids, ggml_tensor* input_ids,
struct ggml_tensor* input_pos, ggml_tensor* input_pos,
struct ggml_tensor* attention_mask, ggml_tensor* attention_mask,
std::vector<std::pair<int, ggml_tensor*>> image_embeds, std::vector<std::pair<int, ggml_tensor*>> image_embeds,
std::set<int> out_layers) { std::set<int> out_layers) {
// input_ids: [N, n_token] // input_ids: [N, n_token]
// return: [N, n_token, hidden_size] // return: [N, n_token, hidden_size]
@ -1036,12 +1038,12 @@ namespace LLM {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* input_ids, ggml_tensor* input_ids,
struct ggml_tensor* input_pos, ggml_tensor* input_pos,
struct ggml_tensor* attention_mask, ggml_tensor* attention_mask,
std::vector<std::pair<int, ggml_tensor*>> image_embeds, std::vector<std::pair<int, ggml_tensor*>> image_embeds,
std::set<int> out_layers) { std::set<int> out_layers) {
// input_ids: [N, n_token] // input_ids: [N, n_token]
auto model = std::dynamic_pointer_cast<TextModel>(blocks["model"]); auto model = std::dynamic_pointer_cast<TextModel>(blocks["model"]);
@ -1049,12 +1051,12 @@ namespace LLM {
return x; return x;
} }
struct ggml_tensor* vision_forward(GGMLRunnerContext* ctx, ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
struct ggml_tensor* pixel_values, ggml_tensor* pixel_values,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* window_index, ggml_tensor* window_index,
struct ggml_tensor* window_inverse_index, ggml_tensor* window_inverse_index,
struct ggml_tensor* window_mask) { ggml_tensor* window_mask) {
GGML_ASSERT(enable_vision); GGML_ASSERT(enable_vision);
auto vision_model = std::dynamic_pointer_cast<VisionModel>(blocks["visual"]); auto vision_model = std::dynamic_pointer_cast<VisionModel>(blocks["visual"]);
return vision_model->forward(ctx, pixel_values, pe, window_index, window_inverse_index, window_mask); return vision_model->forward(ctx, pixel_values, pe, window_index, window_inverse_index, window_mask);
@ -1155,40 +1157,41 @@ namespace LLM {
return llm_arch_to_str[static_cast<int>(params.arch)]; return llm_arch_to_str[static_cast<int>(params.arch)];
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
model.get_param_tensors(tensors, prefix); model.get_param_tensors(tensors, prefix);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* input_ids, ggml_tensor* input_ids,
struct ggml_tensor* input_pos, ggml_tensor* input_pos,
struct ggml_tensor* attention_mask, ggml_tensor* attention_mask,
std::vector<std::pair<int, ggml_tensor*>> image_embeds, std::vector<std::pair<int, ggml_tensor*>> image_embeds,
std::set<int> out_layers) { std::set<int> out_layers) {
auto hidden_states = model.forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); // [N, n_token, hidden_size] auto hidden_states = model.forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); // [N, n_token, hidden_size]
return hidden_states; return hidden_states;
} }
struct ggml_tensor* vision_forward(GGMLRunnerContext* ctx, ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
struct ggml_tensor* pixel_values, ggml_tensor* pixel_values,
struct ggml_tensor* input_pos, ggml_tensor* input_pos,
struct ggml_tensor* window_index, ggml_tensor* window_index,
struct ggml_tensor* window_inverse_index, ggml_tensor* window_inverse_index,
struct ggml_tensor* window_mask) { ggml_tensor* window_mask) {
auto hidden_states = model.vision_forward(ctx, pixel_values, input_pos, window_index, window_inverse_index, window_mask); auto hidden_states = model.vision_forward(ctx, pixel_values, input_pos, window_index, window_inverse_index, window_mask);
return hidden_states; return hidden_states;
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids, ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
struct ggml_tensor* attention_mask, const sd::Tensor<float>& attention_mask_tensor,
std::vector<std::pair<int, ggml_tensor*>> image_embeds, const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds_tensor,
std::set<int> out_layers) { std::set<int> out_layers) {
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* input_ids = make_input(input_ids_tensor);
input_ids = to_backend(input_ids); std::vector<std::pair<int, ggml_tensor*>> image_embeds;
image_embeds.reserve(image_embeds_tensor.size());
for (auto& image_embed : image_embeds) { for (const auto& [idx, embed_tensor] : image_embeds_tensor) {
image_embed.second = to_backend(image_embed.second); ggml_tensor* embed = make_input(embed_tensor);
image_embeds.emplace_back(idx, embed);
} }
int64_t n_tokens = input_ids->ne[0]; int64_t n_tokens = input_ids->ne[0];
@ -1212,8 +1215,9 @@ namespace LLM {
input_pos_vec.size()); input_pos_vec.size());
set_backend_tensor_data(input_pos, input_pos_vec.data()); set_backend_tensor_data(input_pos, input_pos_vec.data());
if (attention_mask != nullptr) { ggml_tensor* attention_mask = nullptr;
attention_mask = to_backend(attention_mask); if (!attention_mask_tensor.empty()) {
attention_mask = make_input(attention_mask_tensor);
} else { } else {
attention_mask_vec.resize(n_tokens * n_tokens); attention_mask_vec.resize(n_tokens * n_tokens);
for (int i0 = 0; i0 < n_tokens; i0++) { for (int i0 = 0; i0 < n_tokens; i0++) {
@ -1231,24 +1235,22 @@ namespace LLM {
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers);
ggml_build_forward_expand(gf, hidden_states); ggml_build_forward_expand(gf, hidden_states);
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> compute(const int n_threads,
struct ggml_tensor* input_ids, const sd::Tensor<int32_t>& input_ids,
struct ggml_tensor* attention_mask, const sd::Tensor<float>& attention_mask,
std::vector<std::pair<int, ggml_tensor*>> image_embeds, const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds,
std::set<int> out_layers, std::set<int> out_layers) {
ggml_tensor** output, auto get_graph = [&]() -> ggml_cgraph* {
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(input_ids, attention_mask, image_embeds, out_layers); return build_graph(input_ids, attention_mask, image_embeds, out_layers);
}; };
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx); return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true));
} }
int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) { int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) {
@ -1260,7 +1262,7 @@ namespace LLM {
return grid_t * grid_h * grid_w; return grid_t * grid_h * grid_w;
} }
struct ggml_tensor* process_image(struct ggml_context* ctx, struct ggml_tensor* image) { ggml_tensor* process_image(ggml_context* ctx, ggml_tensor* image) {
// image: [C, H, W] // image: [C, H, W]
// return: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw], grid_t == 1 // return: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw], grid_t == 1
int64_t C = image->ne[2]; int64_t C = image->ne[2];
@ -1287,8 +1289,9 @@ namespace LLM {
return image; return image;
} }
struct ggml_cgraph* build_encode_image_graph(struct ggml_tensor* image) { ggml_cgraph* build_encode_image_graph(const sd::Tensor<float>& image_tensor) {
struct ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
ggml_tensor* image = make_input(image_tensor);
GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0); GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0); GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
@ -1300,8 +1303,6 @@ namespace LLM {
int llm_grid_w = grid_w / params.vision.spatial_merge_size; int llm_grid_w = grid_w / params.vision.spatial_merge_size;
int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size; int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size;
image = to_backend(image);
auto pixel_values = process_image(compute_ctx, image); auto pixel_values = process_image(compute_ctx, image);
// window index // window index
@ -1398,26 +1399,24 @@ namespace LLM {
// pe->data = nullptr; // pe->data = nullptr;
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
auto runnter_ctx = get_context(); auto runnter_ctx = get_context();
struct ggml_tensor* hidden_states = vision_forward(&runnter_ctx, ggml_tensor* hidden_states = vision_forward(&runnter_ctx,
pixel_values, pixel_values,
pe, pe,
window_index, window_index,
window_inverse_index, window_inverse_index,
window_mask); window_mask);
ggml_build_forward_expand(gf, hidden_states); ggml_build_forward_expand(gf, hidden_states);
return gf; return gf;
} }
void encode_image(const int n_threads, sd::Tensor<float> encode_image(const int n_threads,
struct ggml_tensor* image, const sd::Tensor<float>& image) {
ggml_tensor** output, auto get_graph = [&]() -> ggml_cgraph* {
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_encode_image_graph(image); return build_encode_image_graph(image);
}; };
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, false));
} }
}; };
@ -1439,7 +1438,7 @@ namespace LLM {
} }
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
model.get_param_tensors(tensors, prefix); model.get_param_tensors(tensors, prefix);
} }
@ -1491,44 +1490,46 @@ namespace LLM {
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
bool test_mistral = false; bool test_mistral = false;
bool test_qwen3 = true; bool test_qwen3 = true;
bool test_vit = false; bool test_vit = false;
bool test_decoder_with_vit = false; bool test_decoder_with_vit = false;
if (test_decoder_with_vit) { if (test_decoder_with_vit) {
ggml_tensor* image_embed = nullptr; sd::Tensor<float> image_embed;
{ {
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin"); auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin");
print_ggml_tensor(image, false, "image"); print_sd_tensor(image, false, "image");
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
model.encode_image(8, image, &out, work_ctx); auto out_opt = model.encode_image(8, image);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out, false, "image_embed"); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out, false, "image_embed");
image_embed = out; image_embed = out;
LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0); LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0);
} }
std::string placeholder = "<|image_pad|>"; std::string placeholder = "<|image_pad|>";
std::string img_prompt = "Picture 1: <|vision_start|>"; // [24669, 220, 16, 25, 220, 151652] std::string img_prompt = "Picture 1: <|vision_start|>"; // [24669, 220, 16, 25, 220, 151652]
int64_t num_image_tokens = image_embed->ne[1]; int64_t num_image_tokens = image_embed.shape()[1];
img_prompt.reserve(num_image_tokens * placeholder.size()); img_prompt.reserve(num_image_tokens * placeholder.size());
for (int i = 0; i < num_image_tokens; i++) { for (int i = 0; i < num_image_tokens; i++) {
img_prompt += placeholder; img_prompt += placeholder;
} }
img_prompt += "<|vision_end|>"; img_prompt += "<|vision_end|>";
std::vector<std::pair<int, ggml_tensor*>> image_embeds; std::vector<std::pair<int, sd::Tensor<float>>> image_embeds;
image_embeds.emplace_back(64, image_embed); image_embeds.emplace_back(64, image_embed);
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
@ -1546,29 +1547,33 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx); auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), image_embeds, {});
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else if (test_vit) { } else if (test_vit) {
// auto image = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 280, 280, 3); // auto image = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 280, 280, 3);
// ggml_set_f32(image, 0.f); // ggml_set_f32(image, 0.f);
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin"); auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin");
print_ggml_tensor(image, false, "image"); print_sd_tensor(image, false, "image");
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
model.encode_image(8, image, &out, work_ctx); auto out_opt = model.encode_image(8, image);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out, false, "out"); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out, false, "out");
// auto ref_out = load_tensor_from_file(work_ctx, "qwen2vl.bin"); // auto ref_out = load_tensor_from_file(ctx, "qwen2vl.bin");
// ggml_ext_tensor_diff(ref_out, out, 0.01f); // ggml_ext_tensor_diff(ref_out, out, 0.01f);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
@ -1586,14 +1591,16 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx); auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {10, 20, 30});
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else if (test_qwen3) { } else if (test_qwen3) {
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
@ -1609,14 +1616,16 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx); auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {35});
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else { } else {
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
@ -1632,14 +1641,16 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx); auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {});
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} }
} }

View File

@ -9,7 +9,7 @@
struct LoraModel : public GGMLRunner { struct LoraModel : public GGMLRunner {
std::string lora_id; std::string lora_id;
float multiplier = 1.0f; float multiplier = 1.0f;
std::unordered_map<std::string, struct ggml_tensor*> lora_tensors; std::unordered_map<std::string, ggml_tensor*> lora_tensors;
std::map<ggml_tensor*, ggml_tensor*> original_tensor_to_final_tensor; std::map<ggml_tensor*, ggml_tensor*> original_tensor_to_final_tensor;
std::set<std::string> applied_lora_tensors; std::set<std::string> applied_lora_tensors;
std::string file_path; std::string file_path;
@ -76,13 +76,13 @@ struct LoraModel : public GGMLRunner {
} }
for (const auto& pair : tensors_to_create) { for (const auto& pair : tensors_to_create) {
const auto& name = pair.first; const auto& name = pair.first;
const auto& ts = pair.second; const auto& ts = pair.second;
struct ggml_tensor* real = ggml_new_tensor(params_ctx, ggml_tensor* real = ggml_new_tensor(params_ctx,
ts.type, ts.type,
ts.n_dims, ts.n_dims,
ts.ne); ts.ne);
lora_tensors[name] = real; lora_tensors[name] = real;
} }
alloc_params_buffer(); alloc_params_buffer();
@ -337,10 +337,10 @@ struct LoraModel : public GGMLRunner {
} }
scale_value *= multiplier; scale_value *= multiplier;
struct ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid); ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid);
struct ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid); ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid);
auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2); auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2);
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true); curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
if (updown == nullptr) { if (updown == nullptr) {
updown = curr_updown; updown = curr_updown;
} else { } else {
@ -747,9 +747,9 @@ struct LoraModel : public GGMLRunner {
return out_diff; return out_diff;
} }
struct ggml_cgraph* build_lora_graph(const std::map<std::string, ggml_tensor*>& model_tensors, SDVersion version) { ggml_cgraph* build_lora_graph(const std::map<std::string, ggml_tensor*>& model_tensors, SDVersion version) {
size_t lora_graph_size = LORA_GRAPH_BASE_SIZE + lora_tensors.size() * 10; size_t lora_graph_size = LORA_GRAPH_BASE_SIZE + lora_tensors.size() * 10;
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, lora_graph_size, false); ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, lora_graph_size, false);
preprocess_lora_tensors(model_tensors); preprocess_lora_tensors(model_tensors);
@ -788,11 +788,11 @@ struct LoraModel : public GGMLRunner {
return gf; return gf;
} }
void apply(std::map<std::string, struct ggml_tensor*> model_tensors, SDVersion version, int n_threads) { void apply(std::map<std::string, ggml_tensor*> model_tensors, SDVersion version, int n_threads) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_lora_graph(model_tensors, version); return build_lora_graph(model_tensors, version);
}; };
GGMLRunner::compute(get_graph, n_threads, false); GGMLRunner::compute<float>(get_graph, n_threads, false, true);
stat(); stat();
for (auto item : original_tensor_to_final_tensor) { for (auto item : original_tensor_to_final_tensor) {
ggml_tensor* original_tensor = item.first; ggml_tensor* original_tensor = item.first;

View File

@ -1,8 +1,7 @@
#ifndef __LTXV_HPP__ #ifndef __LTXV_HPP__
#define __LTXV_HPP__ #define __LTXV_HPP__
#include "common.hpp" #include "common_block.hpp"
#include "ggml_extend.hpp"
namespace LTXV { namespace LTXV {
@ -27,9 +26,9 @@ namespace LTXV {
bias)); bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
bool causal = true) { bool causal = true) {
// x: [N*IC, ID, IH, IW] // x: [N*IC, ID, IH, IW]
// result: [N*OC, OD, OH, OW] // result: [N*OC, OD, OH, OW]
auto conv = std::dynamic_pointer_cast<Conv3d>(blocks["conv"]); auto conv = std::dynamic_pointer_cast<Conv3d>(blocks["conv"]);

View File

@ -27,7 +27,7 @@ public:
blocks["fc2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_features, out_features, bias)); blocks["fc2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_features, out_features, bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, n_token, in_features] // x: [N, n_token, in_features]
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]); auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]); auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
@ -72,7 +72,7 @@ public:
bias)); bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, C, H, W] // x: [N, C, H, W]
// return: [N, H*W, embed_dim] // return: [N, H*W, embed_dim]
auto proj = std::dynamic_pointer_cast<Conv2d>(blocks["proj"]); auto proj = std::dynamic_pointer_cast<Conv2d>(blocks["proj"]);
@ -111,7 +111,7 @@ public:
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, out_channels, true, true)); blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, out_channels, true, true));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* t) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* t) {
// t: [N, ] // t: [N, ]
// return: [N, hidden_size] // return: [N, hidden_size]
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]); auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
@ -135,7 +135,7 @@ public:
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size, true, true)); blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size, true, true));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, input_dim] // x: [N, input_dim]
// return: [N, hidden_size] // return: [N, hidden_size]
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]); auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
@ -175,7 +175,7 @@ public:
} }
} }
std::vector<struct ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) { std::vector<ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]); auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
auto qkv = qkv_proj->forward(ctx, x); auto qkv = qkv_proj->forward(ctx, x);
@ -198,7 +198,7 @@ public:
return {q, k, v}; return {q, k, v};
} }
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
GGML_ASSERT(!pre_only); GGML_ASSERT(!pre_only);
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]); auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
@ -208,8 +208,8 @@ public:
} }
// x: [N, n_token, dim] // x: [N, n_token, dim]
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x) {
auto qkv = pre_attention(ctx, x); auto qkv = pre_attention(ctx, x);
x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim] x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim]
x = post_attention(ctx, x); // [N, n_token, dim] x = post_attention(ctx, x); // [N, n_token, dim]
@ -217,10 +217,10 @@ public:
} }
}; };
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx, __STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* shift, ggml_tensor* shift,
struct ggml_tensor* scale) { ggml_tensor* scale) {
// x: [N, L, C] // x: [N, L, C]
// scale: [N, C] // scale: [N, C]
// shift: [N, C] // shift: [N, C]
@ -274,8 +274,8 @@ public:
} }
std::tuple<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention_x(GGMLRunnerContext* ctx, std::tuple<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention_x(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
GGML_ASSERT(self_attn); GGML_ASSERT(self_attn);
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
@ -309,9 +309,9 @@ public:
return {qkv, qkv2, {x, gate_msa, shift_mlp, scale_mlp, gate_mlp, gate_msa2}}; return {qkv, qkv2, {x, gate_msa, shift_mlp, scale_mlp, gate_mlp, gate_msa2}};
} }
std::pair<std::vector<struct ggml_tensor*>, std::vector<struct ggml_tensor*>> pre_attention(GGMLRunnerContext* ctx, std::pair<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
auto norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm1"]); auto norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm1"]);
@ -346,15 +346,15 @@ public:
} }
} }
struct ggml_tensor* post_attention_x(GGMLRunnerContext* ctx, ggml_tensor* post_attention_x(GGMLRunnerContext* ctx,
struct ggml_tensor* attn_out, ggml_tensor* attn_out,
struct ggml_tensor* attn2_out, ggml_tensor* attn2_out,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* gate_msa, ggml_tensor* gate_msa,
struct ggml_tensor* shift_mlp, ggml_tensor* shift_mlp,
struct ggml_tensor* scale_mlp, ggml_tensor* scale_mlp,
struct ggml_tensor* gate_mlp, ggml_tensor* gate_mlp,
struct ggml_tensor* gate_msa2) { ggml_tensor* gate_msa2) {
// attn_out: [N, n_token, hidden_size] // attn_out: [N, n_token, hidden_size]
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// gate_msa: [N, hidden_size] // gate_msa: [N, hidden_size]
@ -384,13 +384,13 @@ public:
return x; return x;
} }
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* post_attention(GGMLRunnerContext* ctx,
struct ggml_tensor* attn_out, ggml_tensor* attn_out,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* gate_msa, ggml_tensor* gate_msa,
struct ggml_tensor* shift_mlp, ggml_tensor* shift_mlp,
struct ggml_tensor* scale_mlp, ggml_tensor* scale_mlp,
struct ggml_tensor* gate_mlp) { ggml_tensor* gate_mlp) {
// attn_out: [N, n_token, hidden_size] // attn_out: [N, n_token, hidden_size]
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// gate_msa: [N, hidden_size] // gate_msa: [N, hidden_size]
@ -416,9 +416,9 @@ public:
return x; return x;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
// return: [N, n_token, hidden_size] // return: [N, n_token, hidden_size]
@ -463,11 +463,11 @@ public:
} }
}; };
__STATIC_INLINE__ std::pair<struct ggml_tensor*, struct ggml_tensor*> __STATIC_INLINE__ std::pair<ggml_tensor*, ggml_tensor*>
block_mixing(GGMLRunnerContext* ctx, block_mixing(GGMLRunnerContext* ctx,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c, ggml_tensor* c,
std::shared_ptr<DismantledBlock> context_block, std::shared_ptr<DismantledBlock> context_block,
std::shared_ptr<DismantledBlock> x_block) { std::shared_ptr<DismantledBlock> x_block) {
// context: [N, n_context, hidden_size] // context: [N, n_context, hidden_size]
@ -489,7 +489,7 @@ block_mixing(GGMLRunnerContext* ctx,
x_qkv = x_qkv_intermediates.first; x_qkv = x_qkv_intermediates.first;
x_intermediates = x_qkv_intermediates.second; x_intermediates = x_qkv_intermediates.second;
} }
std::vector<struct ggml_tensor*> qkv; std::vector<ggml_tensor*> qkv;
for (int i = 0; i < 3; i++) { for (int i = 0; i < 3; i++) {
qkv.push_back(ggml_concat(ctx->ggml_ctx, context_qkv[i], x_qkv[i], 1)); qkv.push_back(ggml_concat(ctx->ggml_ctx, context_qkv[i], x_qkv[i], 1));
} }
@ -563,10 +563,10 @@ public:
blocks["x_block"] = std::shared_ptr<GGMLBlock>(new DismantledBlock(hidden_size, num_heads, mlp_ratio, qk_norm, qkv_bias, false, self_attn_x)); blocks["x_block"] = std::shared_ptr<GGMLBlock>(new DismantledBlock(hidden_size, num_heads, mlp_ratio, qk_norm, qkv_bias, false, self_attn_x));
} }
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx, std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
auto context_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["context_block"]); auto context_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["context_block"]);
auto x_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["x_block"]); auto x_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["x_block"]);
@ -586,9 +586,9 @@ public:
blocks["adaLN_modulation.1"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, 2 * hidden_size)); blocks["adaLN_modulation.1"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, 2 * hidden_size));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
// return: [N, n_token, patch_size * patch_size * out_channels] // return: [N, n_token, patch_size * patch_size * out_channels]
@ -626,7 +626,7 @@ protected:
int64_t hidden_size; int64_t hidden_size;
std::string qk_norm; std::string qk_norm;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1); params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
} }
@ -705,8 +705,8 @@ public:
blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new FinalLayer(hidden_size, patch_size, out_channels)); blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new FinalLayer(hidden_size, patch_size, out_channels));
} }
struct ggml_tensor* ggml_tensor*
cropped_pos_embed(struct ggml_context* ctx, cropped_pos_embed(ggml_context* ctx,
int64_t h, int64_t h,
int64_t w) { int64_t w) {
auto pos_embed = params["pos_embed"]; auto pos_embed = params["pos_embed"];
@ -745,33 +745,11 @@ public:
return spatial_pos_embed; return spatial_pos_embed;
} }
struct ggml_tensor* unpatchify(struct ggml_context* ctx, ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t h, ggml_tensor* c_mod,
int64_t w) { ggml_tensor* context,
// x: [N, H*W, patch_size * patch_size * C] std::vector<int> skip_layers = std::vector<int>()) {
// return: [N, C, H, W]
int64_t n = x->ne[2];
int64_t c = out_channels;
int64_t p = patch_size;
h = (h + 1) / p;
w = (w + 1) / p;
GGML_ASSERT(h * w == x->ne[1]);
x = ggml_reshape_4d(ctx, x, c, p * p, w * h, n); // [N, H*W, P*P, C]
x = ggml_cont(ctx, ggml_permute(ctx, x, 2, 0, 1, 3)); // [N, C, H*W, P*P]
x = ggml_reshape_4d(ctx, x, p, p, w, h * c * n); // [N*C*H, W, P, P]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*H, P, W, P]
x = ggml_reshape_4d(ctx, x, p * w, p * h, c, n); // [N, C, H*P, W*P]
return x;
}
struct ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx,
struct ggml_tensor* x,
struct ggml_tensor* c_mod,
struct ggml_tensor* context,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, H*W, hidden_size] // x: [N, H*W, hidden_size]
// context: [N, n_context, d_context] // context: [N, n_context, d_context]
// c: [N, hidden_size] // c: [N, hidden_size]
@ -796,12 +774,12 @@ public:
return x; return x;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* t, ggml_tensor* t,
struct ggml_tensor* y = nullptr, ggml_tensor* y = nullptr,
struct ggml_tensor* context = nullptr, ggml_tensor* context = nullptr,
std::vector<int> skip_layers = std::vector<int>()) { std::vector<int> skip_layers = std::vector<int>()) {
// Forward pass of DiT. // Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
// t: (N,) tensor of diffusion timesteps // t: (N,) tensor of diffusion timesteps
@ -811,11 +789,11 @@ public:
auto x_embedder = std::dynamic_pointer_cast<PatchEmbed>(blocks["x_embedder"]); auto x_embedder = std::dynamic_pointer_cast<PatchEmbed>(blocks["x_embedder"]);
auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]); auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]);
int64_t w = x->ne[0]; int64_t W = x->ne[0];
int64_t h = x->ne[1]; int64_t H = x->ne[1];
auto patch_embed = x_embedder->forward(ctx, x); // [N, H*W, hidden_size] auto patch_embed = x_embedder->forward(ctx, x); // [N, H*W, hidden_size]
auto pos_embed = cropped_pos_embed(ctx->ggml_ctx, h, w); // [1, H*W, hidden_size] auto pos_embed = cropped_pos_embed(ctx->ggml_ctx, H, W); // [1, H*W, hidden_size]
x = ggml_add(ctx->ggml_ctx, patch_embed, pos_embed); // [N, H*W, hidden_size] x = ggml_add(ctx->ggml_ctx, patch_embed, pos_embed); // [N, H*W, hidden_size]
auto c = t_embedder->forward(ctx, t); // [N, hidden_size] auto c = t_embedder->forward(ctx, t); // [N, hidden_size]
@ -834,7 +812,7 @@ public:
x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels) x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels)
x = unpatchify(ctx->ggml_ctx, x, h, w); // [N, C, H, W] x = DiT::unpatchify_and_crop(ctx->ggml_ctx, x, H, W, patch_size, patch_size, /*patch_last*/ false); // [N, C, H, W]
return x; return x;
} }
@ -854,89 +832,93 @@ struct MMDiTRunner : public GGMLRunner {
return "mmdit"; return "mmdit";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
mmdit.get_param_tensors(tensors, prefix); mmdit.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor = {},
struct ggml_tensor* y, const sd::Tensor<float>& y_tensor = {},
std::vector<int> skip_layers = std::vector<int>()) { std::vector<int> skip_layers = std::vector<int>()) {
struct ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
x = to_backend(x); ggml_tensor* x = make_input(x_tensor);
context = to_backend(context); ggml_tensor* timesteps = make_input(timesteps_tensor);
y = to_backend(y); ggml_tensor* context = make_optional_input(context_tensor);
timesteps = to_backend(timesteps); ggml_tensor* y = make_optional_input(y_tensor);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = mmdit.forward(&runner_ctx, ggml_tensor* out = mmdit.forward(&runner_ctx,
x, x,
timesteps, timesteps,
y, y,
context, context,
skip_layers); skip_layers);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context = {},
struct ggml_tensor* y, const sd::Tensor<float>& y = {},
struct ggml_tensor** output = nullptr, std::vector<int> skip_layers = std::vector<int>()) {
struct ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, y, skip_layers); return build_graph(x, timesteps, context, y, skip_layers);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// cpu f16: pass // cpu f16: pass
// cpu f32: pass // cpu f32: pass
// cuda f16: pass // cuda f16: pass
// cuda f32: pass // cuda f32: pass
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 128, 128, 16, 1); sd::Tensor<float> x({128, 128, 16, 1});
std::vector<float> timesteps_vec(1, 999.f); std::vector<float> timesteps_vec(1, 999.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec); auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
ggml_set_f32(x, 0.01f); x.fill_(0.01f);
// print_ggml_tensor(x); // print_ggml_tensor(x);
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 154, 1); sd::Tensor<float> context({4096, 154, 1});
ggml_set_f32(context, 0.01f); context.fill_(0.01f);
// print_ggml_tensor(context); // print_ggml_tensor(context);
auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 2048, 1); sd::Tensor<float> y({2048, 1});
ggml_set_f32(y, 0.01f); y.fill_(0.01f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, y, &out, work_ctx); auto out_opt = compute(8,
int64_t t1 = ggml_time_ms(); x,
timesteps,
context,
y);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("mmdit test done in %lldms", t1 - t0); LOG_DEBUG("mmdit test done in %lldms", t1 - t0);
} }
} }

View File

@ -16,10 +16,6 @@
#include "model.h" #include "model.h"
#include "stable-diffusion.h" #include "stable-diffusion.h"
#include "util.h" #include "util.h"
#include "vocab.hpp"
#include "vocab_mistral.hpp"
#include "vocab_qwen.hpp"
#include "vocab_umt5.hpp"
#include "ggml-alloc.h" #include "ggml-alloc.h"
#include "ggml-backend.h" #include "ggml-backend.h"
@ -166,43 +162,7 @@ uint16_t f8_e4m3_to_f16(uint8_t f8) {
} }
uint16_t f8_e5m2_to_f16(uint8_t fp8) { uint16_t f8_e5m2_to_f16(uint8_t fp8) {
uint8_t sign = (fp8 >> 7) & 0x1; return static_cast<uint16_t>(fp8) << 8;
uint8_t exponent = (fp8 >> 2) & 0x1F;
uint8_t mantissa = fp8 & 0x3;
uint16_t fp16_sign = sign << 15;
uint16_t fp16_exponent;
uint16_t fp16_mantissa;
if (exponent == 0 && mantissa == 0) { // zero
return fp16_sign;
}
if (exponent == 0x1F) { // NAN and INF
fp16_exponent = 0x1F;
fp16_mantissa = mantissa ? (mantissa << 8) : 0;
return fp16_sign | (fp16_exponent << 10) | fp16_mantissa;
}
if (exponent == 0) { // subnormal numbers
fp16_mantissa = (mantissa << 8);
return fp16_sign | fp16_mantissa;
}
// normal numbers
int16_t true_exponent = (int16_t)exponent - 15 + 15;
if (true_exponent <= 0) {
fp16_exponent = 0;
fp16_mantissa = (mantissa << 8);
} else if (true_exponent >= 0x1F) {
fp16_exponent = 0x1F;
fp16_mantissa = 0;
} else {
fp16_exponent = (uint16_t)true_exponent;
fp16_mantissa = mantissa << 8;
}
return fp16_sign | (fp16_exponent << 10) | fp16_mantissa;
} }
void f8_e4m3_to_f16_vec(uint8_t* src, uint16_t* dst, int64_t n) { void f8_e4m3_to_f16_vec(uint8_t* src, uint16_t* dst, int64_t n) {
@ -291,7 +251,7 @@ void ModelLoader::add_tensor_storage(const TensorStorage& tensor_storage) {
} }
bool is_zip_file(const std::string& file_path) { bool is_zip_file(const std::string& file_path) {
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r'); zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
if (zip == nullptr) { if (zip == nullptr) {
return false; return false;
} }
@ -457,9 +417,9 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s
size_t total_size = 0; size_t total_size = 0;
size_t data_offset = gguf_get_data_offset(ctx_gguf_); size_t data_offset = gguf_get_data_offset(ctx_gguf_);
for (int i = 0; i < n_tensors; i++) { for (int i = 0; i < n_tensors; i++) {
std::string name = gguf_get_tensor_name(ctx_gguf_, i); std::string name = gguf_get_tensor_name(ctx_gguf_, i);
struct ggml_tensor* dummy = ggml_get_tensor(ctx_meta_, name.c_str()); ggml_tensor* dummy = ggml_get_tensor(ctx_meta_, name.c_str());
size_t offset = data_offset + gguf_get_tensor_offset(ctx_gguf_, i); size_t offset = data_offset + gguf_get_tensor_offset(ctx_gguf_, i);
// LOG_DEBUG("%s", name.c_str()); // LOG_DEBUG("%s", name.c_str());
@ -816,7 +776,7 @@ struct PickleTensorReader {
} }
} }
void read_string(const std::string& str, struct zip_t* zip, std::string dir) { void read_string(const std::string& str, zip_t* zip, std::string dir) {
if (str == "storage") { if (str == "storage") {
read_global_type = true; read_global_type = true;
} else if (str != "state_dict") { } else if (str != "state_dict") {
@ -999,7 +959,7 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
file_paths_.push_back(file_path); file_paths_.push_back(file_path);
size_t file_index = file_paths_.size() - 1; size_t file_index = file_paths_.size() - 1;
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r'); zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
if (zip == nullptr) { if (zip == nullptr) {
LOG_ERROR("failed to open '%s'", file_path.c_str()); LOG_ERROR("failed to open '%s'", file_path.c_str());
return false; return false;
@ -1061,6 +1021,9 @@ SDVersion ModelLoader::get_sd_version() {
if (tensor_storage.name.find("model.diffusion_model.transformer_blocks.0.img_mod.1.weight") != std::string::npos) { if (tensor_storage.name.find("model.diffusion_model.transformer_blocks.0.img_mod.1.weight") != std::string::npos) {
return VERSION_QWEN_IMAGE; return VERSION_QWEN_IMAGE;
} }
if (tensor_storage.name.find("llm_adapter.blocks.0.cross_attn.q_proj.weight") != std::string::npos) {
return VERSION_ANIMA;
}
if (tensor_storage.name.find("model.diffusion_model.double_stream_modulation_img.lin.weight") != std::string::npos) { if (tensor_storage.name.find("model.diffusion_model.double_stream_modulation_img.lin.weight") != std::string::npos) {
is_flux2 = true; is_flux2 = true;
} }
@ -1105,10 +1068,12 @@ SDVersion ModelLoader::get_sd_version() {
tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) { tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) {
has_middle_block_1 = true; has_middle_block_1 = true;
} }
if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos) { if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos ||
tensor_storage.name.find("unet.up_blocks.1.attentions.0.transformer_blocks.1") != std::string::npos) {
has_output_block_311 = true; has_output_block_311 = true;
} }
if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos) { if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos ||
tensor_storage.name.find("unet.up_blocks.2.attentions.1") != std::string::npos) {
has_output_block_71 = true; has_output_block_71 = true;
} }
if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" || if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" ||
@ -1340,36 +1305,6 @@ void ModelLoader::set_wtype_override(ggml_type wtype, std::string tensor_type_ru
} }
} }
std::string ModelLoader::load_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(merges_utf8_c_str), sizeof(merges_utf8_c_str));
return merges_utf8_str;
}
std::string ModelLoader::load_qwen2_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(qwen2_merges_utf8_c_str), sizeof(qwen2_merges_utf8_c_str));
return merges_utf8_str;
}
std::string ModelLoader::load_mistral_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(mistral_merges_utf8_c_str), sizeof(mistral_merges_utf8_c_str));
return merges_utf8_str;
}
std::string ModelLoader::load_mistral_vocab_json() {
std::string json_str(reinterpret_cast<const char*>(mistral_vocab_json_utf8_c_str), sizeof(mistral_vocab_json_utf8_c_str));
return json_str;
}
std::string ModelLoader::load_t5_tokenizer_json() {
std::string json_str(reinterpret_cast<const char*>(t5_tokenizer_json_str), sizeof(t5_tokenizer_json_str));
return json_str;
}
std::string ModelLoader::load_umt5_tokenizer_json() {
std::string json_str(reinterpret_cast<const char*>(umt5_tokenizer_json_str), sizeof(umt5_tokenizer_json_str));
return json_str;
}
bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p, bool enable_mmap) { bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p, bool enable_mmap) {
int64_t process_time_ms = 0; int64_t process_time_ms = 0;
std::atomic<int64_t> read_time_ms(0); std::atomic<int64_t> read_time_ms(0);
@ -1442,7 +1377,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
for (int i = 0; i < n_threads; ++i) { for (int i = 0; i < n_threads; ++i) {
workers.emplace_back([&, file_path, is_zip]() { workers.emplace_back([&, file_path, is_zip]() {
std::ifstream file; std::ifstream file;
struct zip_t* zip = nullptr; zip_t* zip = nullptr;
if (is_zip) { if (is_zip) {
zip = zip_open(file_path.c_str(), 0, 'r'); zip = zip_open(file_path.c_str(), 0, 'r');
if (zip == nullptr) { if (zip == nullptr) {
@ -1630,7 +1565,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
return success; return success;
} }
bool ModelLoader::load_tensors(std::map<std::string, struct ggml_tensor*>& tensors, bool ModelLoader::load_tensors(std::map<std::string, ggml_tensor*>& tensors,
std::set<std::string> ignore_tensors, std::set<std::string> ignore_tensors,
int n_threads, int n_threads,
bool enable_mmap) { bool enable_mmap) {
@ -1644,7 +1579,7 @@ bool ModelLoader::load_tensors(std::map<std::string, struct ggml_tensor*>& tenso
tensor_names_in_file.insert(name); tensor_names_in_file.insert(name);
} }
struct ggml_tensor* real; ggml_tensor* real;
if (tensors.find(name) != tensors.end()) { if (tensors.find(name) != tensors.end()) {
real = tensors[name]; real = tensors[name];
} else { } else {

View File

@ -45,6 +45,7 @@ enum SDVersion {
VERSION_WAN2_2_I2V, VERSION_WAN2_2_I2V,
VERSION_WAN2_2_TI2V, VERSION_WAN2_2_TI2V,
VERSION_QWEN_IMAGE, VERSION_QWEN_IMAGE,
VERSION_ANIMA,
VERSION_FLUX2, VERSION_FLUX2,
VERSION_FLUX2_KLEIN, VERSION_FLUX2_KLEIN,
VERSION_Z_IMAGE, VERSION_Z_IMAGE,
@ -122,6 +123,13 @@ static inline bool sd_version_is_qwen_image(SDVersion version) {
return false; return false;
} }
static inline bool sd_version_is_anima(SDVersion version) {
if (version == VERSION_ANIMA) {
return true;
}
return false;
}
static inline bool sd_version_is_z_image(SDVersion version) { static inline bool sd_version_is_z_image(SDVersion version) {
if (version == VERSION_Z_IMAGE) { if (version == VERSION_Z_IMAGE) {
return true; return true;
@ -146,6 +154,7 @@ static inline bool sd_version_is_dit(SDVersion version) {
sd_version_is_sd3(version) || sd_version_is_sd3(version) ||
sd_version_is_wan(version) || sd_version_is_wan(version) ||
sd_version_is_qwen_image(version) || sd_version_is_qwen_image(version) ||
sd_version_is_anima(version) ||
sd_version_is_z_image(version)) { sd_version_is_z_image(version)) {
return true; return true;
} }
@ -314,7 +323,7 @@ public:
String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; } String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; }
void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = ""); void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = "");
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false); bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false);
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors, bool load_tensors(std::map<std::string, ggml_tensor*>& tensors,
std::set<std::string> ignore_tensors = {}, std::set<std::string> ignore_tensors = {},
int n_threads = 0, int n_threads = 0,
bool use_mmap = false); bool use_mmap = false);
@ -331,13 +340,6 @@ public:
bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type); bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type);
int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT); int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT);
~ModelLoader() = default; ~ModelLoader() = default;
static std::string load_merges();
static std::string load_qwen2_merges();
static std::string load_mistral_merges();
static std::string load_mistral_vocab_json();
static std::string load_t5_tokenizer_json();
static std::string load_umt5_tokenizer_json();
}; };
#endif // __MODEL_H__ #endif // __MODEL_H__

View File

@ -653,6 +653,14 @@ std::string convert_diffusers_dit_to_original_lumina2(std::string name) {
return name; return name;
} }
std::string convert_other_dit_to_original_anima(std::string name) {
static const std::string anima_net_prefix = "net.";
if (!starts_with(name, anima_net_prefix)) {
name = anima_net_prefix + name;
}
return name;
}
std::string convert_diffusion_model_name(std::string name, std::string prefix, SDVersion version) { std::string convert_diffusion_model_name(std::string name, std::string prefix, SDVersion version) {
if (sd_version_is_sd1(version) || sd_version_is_sd2(version)) { if (sd_version_is_sd1(version) || sd_version_is_sd2(version)) {
name = convert_diffusers_unet_to_original_sd1(name); name = convert_diffusers_unet_to_original_sd1(name);
@ -664,6 +672,8 @@ std::string convert_diffusion_model_name(std::string name, std::string prefix, S
name = convert_diffusers_dit_to_original_flux(name); name = convert_diffusers_dit_to_original_flux(name);
} else if (sd_version_is_z_image(version)) { } else if (sd_version_is_z_image(version)) {
name = convert_diffusers_dit_to_original_lumina2(name); name = convert_diffusers_dit_to_original_lumina2(name);
} else if (sd_version_is_anima(version)) {
name = convert_other_dit_to_original_anima(name);
} }
return name; return name;
} }
@ -1110,7 +1120,11 @@ std::string convert_tensor_name(std::string name, SDVersion version) {
for (const auto& prefix : first_stage_model_prefix_vec) { for (const auto& prefix : first_stage_model_prefix_vec) {
if (starts_with(name, prefix)) { if (starts_with(name, prefix)) {
name = convert_first_stage_model_name(name.substr(prefix.size()), prefix); name = convert_first_stage_model_name(name.substr(prefix.size()), prefix);
name = prefix + name; if (version == VERSION_SDXS) {
name = "tae." + name;
} else {
name = prefix + name;
}
break; break;
} }
} }

View File

@ -21,14 +21,14 @@ public:
blocks["layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(in_dim)); blocks["layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(in_dim));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]); auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]); auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layernorm"]); auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layernorm"]);
struct ggml_tensor* r = x; ggml_tensor* r = x;
// x = ggml_ext_layer_norm(ctx, x, ln_w, ln_b); // x = ggml_ext_layer_norm(ctx, x, ln_w, ln_b);
x = layer_norm->forward(ctx, x); x = layer_norm->forward(ctx, x);
// x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b); // x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b);
@ -54,8 +54,8 @@ public:
blocks["1"] = std::shared_ptr<GGMLBlock>(new Mlp(dim, inner_dim, dim, false)); blocks["1"] = std::shared_ptr<GGMLBlock>(new Mlp(dim, inner_dim, dim, false));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x) {
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["0"]); auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["0"]);
auto ff = std::dynamic_pointer_cast<Mlp>(blocks["1"]); auto ff = std::dynamic_pointer_cast<Mlp>(blocks["1"]);
@ -81,9 +81,9 @@ public:
blocks["to_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim, false)); blocks["to_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim, false));
} }
struct ggml_tensor* reshape_tensor(struct ggml_context* ctx, ggml_tensor* reshape_tensor(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int heads) { int heads) {
int64_t ne[4]; int64_t ne[4];
for (int i = 0; i < 4; ++i) for (int i = 0; i < 4; ++i)
ne[i] = x->ne[i]; ne[i] = x->ne[i];
@ -92,17 +92,17 @@ public:
return x; return x;
} }
std::vector<struct ggml_tensor*> chunk_half(struct ggml_context* ctx, std::vector<ggml_tensor*> chunk_half(ggml_context* ctx,
struct ggml_tensor* x) { ggml_tensor* x) {
auto tlo = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], 0); auto tlo = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], 0);
auto tli = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], x->nb[0] * x->ne[0] / 2); auto tli = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], x->nb[0] * x->ne[0] / 2);
return {ggml_cont(ctx, tlo), return {ggml_cont(ctx, tlo),
ggml_cont(ctx, tli)}; ggml_cont(ctx, tli)};
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* latents) { ggml_tensor* latents) {
// x (torch.Tensor): image features // x (torch.Tensor): image features
// shape (b, n1, D) // shape (b, n1, D)
// latent (torch.Tensor): latent features // latent (torch.Tensor): latent features
@ -176,9 +176,9 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* latents, ggml_tensor* latents,
struct ggml_tensor* x) { ggml_tensor* x) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
auto proj_in = std::dynamic_pointer_cast<Linear>(blocks["proj_in"]); auto proj_in = std::dynamic_pointer_cast<Linear>(blocks["proj_in"]);
auto proj_out = std::dynamic_pointer_cast<Linear>(blocks["proj_out"]); auto proj_out = std::dynamic_pointer_cast<Linear>(blocks["proj_out"]);
@ -225,19 +225,19 @@ public:
4)); 4));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* last_hidden_state) { ggml_tensor* last_hidden_state) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
auto token_proj = std::dynamic_pointer_cast<Mlp>(blocks["token_proj"]); auto token_proj = std::dynamic_pointer_cast<Mlp>(blocks["token_proj"]);
auto token_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["token_norm"]); auto token_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["token_norm"]);
auto perceiver_resampler = std::dynamic_pointer_cast<FacePerceiverResampler>(blocks["perceiver_resampler"]); auto perceiver_resampler = std::dynamic_pointer_cast<FacePerceiverResampler>(blocks["perceiver_resampler"]);
x = token_proj->forward(ctx, x); x = token_proj->forward(ctx, x);
int64_t nel = ggml_nelements(x); int64_t nel = ggml_nelements(x);
x = ggml_reshape_3d(ctx->ggml_ctx, x, cross_attention_dim, num_tokens, nel / (cross_attention_dim * num_tokens)); x = ggml_reshape_3d(ctx->ggml_ctx, x, cross_attention_dim, num_tokens, nel / (cross_attention_dim * num_tokens));
x = token_norm->forward(ctx, x); x = token_norm->forward(ctx, x);
struct ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state); ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state);
if (use_residul) if (use_residul)
out = ggml_add(ctx->ggml_ctx, x, out); out = ggml_add(ctx->ggml_ctx, x, out);
return out; return out;
@ -256,9 +256,9 @@ public:
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(embed_dim)); blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(embed_dim));
} }
struct ggml_tensor* fuse_fn(GGMLRunnerContext* ctx, ggml_tensor* fuse_fn(GGMLRunnerContext* ctx,
struct ggml_tensor* prompt_embeds, ggml_tensor* prompt_embeds,
struct ggml_tensor* id_embeds) { ggml_tensor* id_embeds) {
auto mlp1 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp1"]); auto mlp1 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp1"]);
auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]); auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]);
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]); auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]);
@ -273,24 +273,24 @@ public:
return stacked_id_embeds; return stacked_id_embeds;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* prompt_embeds, ggml_tensor* prompt_embeds,
struct ggml_tensor* id_embeds, ggml_tensor* id_embeds,
struct ggml_tensor* class_tokens_mask, ggml_tensor* class_tokens_mask,
struct ggml_tensor* class_tokens_mask_pos, ggml_tensor* class_tokens_mask_pos,
struct ggml_tensor* left, ggml_tensor* left,
struct ggml_tensor* right) { ggml_tensor* right) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
struct ggml_tensor* valid_id_embeds = id_embeds; ggml_tensor* valid_id_embeds = id_embeds;
// # slice out the image token embeddings // # slice out the image token embeddings
ggml_set_name(class_tokens_mask_pos, "class_tokens_mask_pos"); ggml_set_name(class_tokens_mask_pos, "class_tokens_mask_pos");
ggml_set_name(prompt_embeds, "prompt_embeds"); ggml_set_name(prompt_embeds, "prompt_embeds");
struct ggml_tensor* image_token_embeds = ggml_get_rows(ctx->ggml_ctx, prompt_embeds, class_tokens_mask_pos); ggml_tensor* image_token_embeds = ggml_get_rows(ctx->ggml_ctx, prompt_embeds, class_tokens_mask_pos);
ggml_set_name(image_token_embeds, "image_token_embeds"); ggml_set_name(image_token_embeds, "image_token_embeds");
valid_id_embeds = ggml_reshape_2d(ctx->ggml_ctx, valid_id_embeds, valid_id_embeds->ne[0], valid_id_embeds = ggml_reshape_2d(ctx->ggml_ctx, valid_id_embeds, valid_id_embeds->ne[0],
ggml_nelements(valid_id_embeds) / valid_id_embeds->ne[0]); ggml_nelements(valid_id_embeds) / valid_id_embeds->ne[0]);
struct ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds); ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds);
if (left && right) { if (left && right) {
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, left, stacked_id_embeds, 1); stacked_id_embeds = ggml_concat(ctx->ggml_ctx, left, stacked_id_embeds, 1);
@ -301,10 +301,10 @@ public:
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, stacked_id_embeds, right, 1); stacked_id_embeds = ggml_concat(ctx->ggml_ctx, stacked_id_embeds, right, 1);
} }
class_tokens_mask = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, class_tokens_mask)); class_tokens_mask = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, class_tokens_mask));
class_tokens_mask = ggml_repeat(ctx->ggml_ctx, class_tokens_mask, prompt_embeds); class_tokens_mask = ggml_repeat(ctx->ggml_ctx, class_tokens_mask, prompt_embeds);
prompt_embeds = ggml_mul(ctx->ggml_ctx, prompt_embeds, class_tokens_mask); prompt_embeds = ggml_mul(ctx->ggml_ctx, prompt_embeds, class_tokens_mask);
struct ggml_tensor* updated_prompt_embeds = ggml_add(ctx->ggml_ctx, prompt_embeds, stacked_id_embeds); ggml_tensor* updated_prompt_embeds = ggml_add(ctx->ggml_ctx, prompt_embeds, stacked_id_embeds);
ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds"); ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds");
return updated_prompt_embeds; return updated_prompt_embeds;
} }
@ -317,22 +317,22 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
blocks["fuse_module"] = std::shared_ptr<GGMLBlock>(new FuseModule(2048)); blocks["fuse_module"] = std::shared_ptr<GGMLBlock>(new FuseModule(2048));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* id_pixel_values, ggml_tensor* id_pixel_values,
struct ggml_tensor* prompt_embeds, ggml_tensor* prompt_embeds,
struct ggml_tensor* class_tokens_mask, ggml_tensor* class_tokens_mask,
struct ggml_tensor* class_tokens_mask_pos, ggml_tensor* class_tokens_mask_pos,
struct ggml_tensor* left, ggml_tensor* left,
struct ggml_tensor* right) { ggml_tensor* right) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]); auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
auto visual_projection = std::dynamic_pointer_cast<CLIPProjection>(blocks["visual_projection"]); auto visual_projection = std::dynamic_pointer_cast<CLIPProjection>(blocks["visual_projection"]);
auto visual_projection_2 = std::dynamic_pointer_cast<Linear>(blocks["visual_projection_2"]); auto visual_projection_2 = std::dynamic_pointer_cast<Linear>(blocks["visual_projection_2"]);
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]); auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
struct ggml_tensor* shared_id_embeds = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size] ggml_tensor* shared_id_embeds = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
struct ggml_tensor* id_embeds = visual_projection->forward(ctx, shared_id_embeds); // [N, proj_dim(768)] ggml_tensor* id_embeds = visual_projection->forward(ctx, shared_id_embeds); // [N, proj_dim(768)]
struct ggml_tensor* id_embeds_2 = visual_projection_2->forward(ctx, shared_id_embeds); // [N, 1280] ggml_tensor* id_embeds_2 = visual_projection_2->forward(ctx, shared_id_embeds); // [N, 1280]
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 2, 0, 1, 3)); id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 2, 0, 1, 3));
id_embeds_2 = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds_2, 2, 0, 1, 3)); id_embeds_2 = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds_2, 2, 0, 1, 3));
@ -340,12 +340,12 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
id_embeds = ggml_concat(ctx->ggml_ctx, id_embeds, id_embeds_2, 2); // [batch_size, seq_length, 1, 2048] check whether concat at dim 2 is right id_embeds = ggml_concat(ctx->ggml_ctx, id_embeds, id_embeds_2, 2); // [batch_size, seq_length, 1, 2048] check whether concat at dim 2 is right
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 1, 2, 0, 3)); id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 1, 2, 0, 3));
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx, ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
prompt_embeds, prompt_embeds,
id_embeds, id_embeds,
class_tokens_mask, class_tokens_mask,
class_tokens_mask_pos, class_tokens_mask_pos,
left, right); left, right);
return updated_prompt_embeds; return updated_prompt_embeds;
} }
}; };
@ -365,29 +365,29 @@ struct PhotoMakerIDEncoder_CLIPInsightfaceExtendtokenBlock : public CLIPVisionMo
num_tokens)); num_tokens));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* id_pixel_values, ggml_tensor* id_pixel_values,
struct ggml_tensor* prompt_embeds, ggml_tensor* prompt_embeds,
struct ggml_tensor* class_tokens_mask, ggml_tensor* class_tokens_mask,
struct ggml_tensor* class_tokens_mask_pos, ggml_tensor* class_tokens_mask_pos,
struct ggml_tensor* id_embeds, ggml_tensor* id_embeds,
struct ggml_tensor* left, ggml_tensor* left,
struct ggml_tensor* right) { ggml_tensor* right) {
// x: [N, channels, h, w] // x: [N, channels, h, w]
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]); auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]); auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
auto qformer_perceiver = std::dynamic_pointer_cast<QFormerPerceiver>(blocks["qformer_perceiver"]); auto qformer_perceiver = std::dynamic_pointer_cast<QFormerPerceiver>(blocks["qformer_perceiver"]);
// struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size] // ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size] ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size]
id_embeds = qformer_perceiver->forward(ctx, id_embeds, last_hidden_state); id_embeds = qformer_perceiver->forward(ctx, id_embeds, last_hidden_state);
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx, ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
prompt_embeds, prompt_embeds,
id_embeds, id_embeds,
class_tokens_mask, class_tokens_mask,
class_tokens_mask_pos, class_tokens_mask_pos,
left, right); left, right);
return updated_prompt_embeds; return updated_prompt_embeds;
} }
}; };
@ -436,18 +436,17 @@ public:
return pm_version; return pm_version;
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
if (pm_version == PM_VERSION_1) if (pm_version == PM_VERSION_1)
id_encoder.get_param_tensors(tensors, prefix); id_encoder.get_param_tensors(tensors, prefix);
else if (pm_version == PM_VERSION_2) else if (pm_version == PM_VERSION_2)
id_encoder2.get_param_tensors(tensors, prefix); id_encoder2.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph( // struct ggml_allocr* allocr, ggml_cgraph* build_graph(const sd::Tensor<float>& id_pixel_values_tensor,
struct ggml_tensor* id_pixel_values, const sd::Tensor<float>& prompt_embeds_tensor,
struct ggml_tensor* prompt_embeds, std::vector<bool>& class_tokens_mask,
std::vector<bool>& class_tokens_mask, const sd::Tensor<float>& id_embeds_tensor = {}) {
struct ggml_tensor* id_embeds) {
ctm.clear(); ctm.clear();
ctmf16.clear(); ctmf16.clear();
ctmpos.clear(); ctmpos.clear();
@ -458,20 +457,20 @@ public:
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* id_pixel_values = make_input(id_pixel_values_tensor);
ggml_tensor* prompt_embeds = make_input(prompt_embeds_tensor);
ggml_tensor* id_embeds = make_optional_input(id_embeds_tensor);
int64_t hidden_size = prompt_embeds->ne[0]; int64_t hidden_size = prompt_embeds->ne[0];
int64_t seq_length = prompt_embeds->ne[1]; int64_t seq_length = prompt_embeds->ne[1];
ggml_type type = GGML_TYPE_F32; ggml_type type = GGML_TYPE_F32;
struct ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size()); ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
struct ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values); ggml_tensor* left = nullptr;
struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds); ggml_tensor* right = nullptr;
struct ggml_tensor* id_embeds_d = to_backend(id_embeds);
struct ggml_tensor* left = nullptr;
struct ggml_tensor* right = nullptr;
for (int i = 0; i < class_tokens_mask.size(); i++) { for (int i = 0; i < class_tokens_mask.size(); i++) {
if (class_tokens_mask[i]) { if (class_tokens_mask[i]) {
// printf(" 1,"); // printf(" 1,");
@ -495,7 +494,7 @@ public:
right = ggml_new_tensor_3d(runner_ctx.ggml_ctx, type, right = ggml_new_tensor_3d(runner_ctx.ggml_ctx, type,
hidden_size, seq_length - ctmpos[ctmpos.size() - 1] - 1, 1); hidden_size, seq_length - ctmpos[ctmpos.size() - 1] - 1, 1);
} }
struct ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(runner_ctx.ggml_ctx, GGML_TYPE_I32, ctmpos.size()); ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(runner_ctx.ggml_ctx, GGML_TYPE_I32, ctmpos.size());
{ {
if (type == GGML_TYPE_F16) if (type == GGML_TYPE_F16)
@ -526,21 +525,21 @@ public:
} }
} }
} }
struct ggml_tensor* updated_prompt_embeds = nullptr; ggml_tensor* updated_prompt_embeds = nullptr;
if (pm_version == PM_VERSION_1) if (pm_version == PM_VERSION_1)
updated_prompt_embeds = id_encoder.forward(&runner_ctx, updated_prompt_embeds = id_encoder.forward(&runner_ctx,
id_pixel_values_d, id_pixel_values,
prompt_embeds_d, prompt_embeds,
class_tokens_mask_d, class_tokens_mask_d,
class_tokens_mask_pos, class_tokens_mask_pos,
left, right); left, right);
else if (pm_version == PM_VERSION_2) else if (pm_version == PM_VERSION_2)
updated_prompt_embeds = id_encoder2.forward(&runner_ctx, updated_prompt_embeds = id_encoder2.forward(&runner_ctx,
id_pixel_values_d, id_pixel_values,
prompt_embeds_d, prompt_embeds,
class_tokens_mask_d, class_tokens_mask_d,
class_tokens_mask_pos, class_tokens_mask_pos,
id_embeds_d, id_embeds,
left, right); left, right);
ggml_build_forward_expand(gf, updated_prompt_embeds); ggml_build_forward_expand(gf, updated_prompt_embeds);
@ -548,25 +547,21 @@ public:
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> compute(const int n_threads,
struct ggml_tensor* id_pixel_values, const sd::Tensor<float>& id_pixel_values,
struct ggml_tensor* prompt_embeds, const sd::Tensor<float>& prompt_embeds,
struct ggml_tensor* id_embeds, const sd::Tensor<float>& id_embeds,
std::vector<bool>& class_tokens_mask, std::vector<bool>& class_tokens_mask) {
struct ggml_tensor** updated_prompt_embeds, auto get_graph = [&]() -> ggml_cgraph* {
ggml_context* output_ctx) {
auto get_graph = [&]() -> struct ggml_cgraph* {
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds); return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
}; };
// GGMLRunner::compute(get_graph, n_threads, updated_prompt_embeds); return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true));
return GGMLRunner::compute(get_graph, n_threads, true, updated_prompt_embeds, output_ctx);
} }
}; };
struct PhotoMakerIDEmbed : public GGMLRunner { struct PhotoMakerIDEmbed : public GGMLRunner {
std::map<std::string, struct ggml_tensor*> tensors; std::map<std::string, ggml_tensor*> tensors;
std::string file_path; std::string file_path;
ModelLoader* model_loader; ModelLoader* model_loader;
bool load_failed = false; bool load_failed = false;
@ -606,11 +601,11 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
} }
if (dry_run) { if (dry_run) {
std::lock_guard<std::mutex> lock(tensor_mutex); std::lock_guard<std::mutex> lock(tensor_mutex);
struct ggml_tensor* real = ggml_new_tensor(params_ctx, ggml_tensor* real = ggml_new_tensor(params_ctx,
tensor_storage.type, tensor_storage.type,
tensor_storage.n_dims, tensor_storage.n_dims,
tensor_storage.ne); tensor_storage.ne);
tensors[name] = real; tensors[name] = real;
} else { } else {
auto real = tensors[name]; auto real = tensors[name];
*dst_tensor = real; *dst_tensor = real;
@ -629,8 +624,8 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
return true; return true;
} }
struct ggml_tensor* get() { ggml_tensor* get() {
std::map<std::string, struct ggml_tensor*>::iterator pos; std::map<std::string, ggml_tensor*>::iterator pos;
pos = tensors.find("pmid.id_embeds"); pos = tensors.find("pmid.id_embeds");
if (pos != tensors.end()) if (pos != tensors.end())
return pos->second; return pos->second;

278
src/preprocessing.hpp Normal file
View File

@ -0,0 +1,278 @@
#ifndef __PREPROCESSING_HPP__
#define __PREPROCESSING_HPP__
#include <cmath>
#include <limits>
#include "ggml_extend.hpp"
#define M_PI_ 3.14159265358979323846f
static inline int64_t preprocessing_offset_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
const auto& shape = tensor.shape();
int64_t n0 = shape.size() > 0 ? shape[0] : 1;
int64_t n1 = shape.size() > 1 ? shape[1] : 1;
int64_t n2 = shape.size() > 2 ? shape[2] : 1;
return ((i3 * n2 + i2) * n1 + i1) * n0 + i0;
}
static inline float preprocessing_get_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
return tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))];
}
static inline void preprocessing_set_4d(sd::Tensor<float>& tensor, float value, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value;
}
static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t image) {
sd::Tensor<float> tensor({static_cast<int64_t>(image.width), static_cast<int64_t>(image.height), static_cast<int64_t>(image.channel), 1});
for (uint32_t y = 0; y < image.height; ++y) {
for (uint32_t x = 0; x < image.width; ++x) {
for (uint32_t c = 0; c < image.channel; ++c) {
preprocessing_set_4d(tensor, sd_image_get_f32(image, x, y, c), x, y, c, 0);
}
}
}
return tensor;
}
static inline void preprocessing_tensor_to_sd_image(const sd::Tensor<float>& tensor, uint8_t* image_data) {
GGML_ASSERT(tensor.dim() == 4);
GGML_ASSERT(tensor.shape()[3] == 1);
GGML_ASSERT(image_data != nullptr);
int width = static_cast<int>(tensor.shape()[0]);
int height = static_cast<int>(tensor.shape()[1]);
int channel = static_cast<int>(tensor.shape()[2]);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
for (int c = 0; c < channel; ++c) {
float value = preprocessing_get_4d(tensor, x, y, c, 0);
value = std::min(1.0f, std::max(0.0f, value));
image_data[(y * width + x) * channel + c] = static_cast<uint8_t>(std::round(value * 255.0f));
}
}
}
}
static inline sd::Tensor<float> gaussian_kernel_tensor(int kernel_size) {
sd::Tensor<float> kernel({kernel_size, kernel_size, 1, 1});
int ks_mid = kernel_size / 2;
float sigma = 1.4f;
float normal = 1.f / (2.0f * M_PI_ * std::pow(sigma, 2.0f));
for (int y = 0; y < kernel_size; ++y) {
float gx = static_cast<float>(-ks_mid + y);
for (int x = 0; x < kernel_size; ++x) {
float gy = static_cast<float>(-ks_mid + x);
float k = std::exp(-((gx * gx + gy * gy) / (2.0f * std::pow(sigma, 2.0f)))) * normal;
preprocessing_set_4d(kernel, k, x, y, 0, 0);
}
}
return kernel;
}
static inline sd::Tensor<float> convolve_tensor(const sd::Tensor<float>& input, const sd::Tensor<float>& kernel, int padding) {
GGML_ASSERT(input.dim() == 4);
GGML_ASSERT(kernel.dim() == 4);
GGML_ASSERT(input.shape()[3] == 1);
GGML_ASSERT(kernel.shape()[2] == 1);
GGML_ASSERT(kernel.shape()[3] == 1);
sd::Tensor<float> output(input.shape());
int64_t width = input.shape()[0];
int64_t height = input.shape()[1];
int64_t channels = input.shape()[2];
int64_t kernel_w = kernel.shape()[0];
int64_t kernel_h = kernel.shape()[1];
for (int64_t c = 0; c < channels; ++c) {
for (int64_t y = 0; y < height; ++y) {
for (int64_t x = 0; x < width; ++x) {
float sum = 0.0f;
for (int64_t ky = 0; ky < kernel_h; ++ky) {
int64_t iy = y + ky - padding;
if (iy < 0 || iy >= height) {
continue;
}
for (int64_t kx = 0; kx < kernel_w; ++kx) {
int64_t ix = x + kx - padding;
if (ix < 0 || ix >= width) {
continue;
}
sum += preprocessing_get_4d(input, ix, iy, c, 0) * preprocessing_get_4d(kernel, kx, ky, 0, 0);
}
}
preprocessing_set_4d(output, sum, x, y, c, 0);
}
}
}
return output;
}
static inline sd::Tensor<float> grayscale_tensor(const sd::Tensor<float>& rgb_img) {
GGML_ASSERT(rgb_img.dim() == 4);
GGML_ASSERT(rgb_img.shape()[2] >= 3);
sd::Tensor<float> grayscale({rgb_img.shape()[0], rgb_img.shape()[1], 1, rgb_img.shape()[3]});
for (int64_t iy = 0; iy < rgb_img.shape()[1]; ++iy) {
for (int64_t ix = 0; ix < rgb_img.shape()[0]; ++ix) {
float r = preprocessing_get_4d(rgb_img, ix, iy, 0, 0);
float g = preprocessing_get_4d(rgb_img, ix, iy, 1, 0);
float b = preprocessing_get_4d(rgb_img, ix, iy, 2, 0);
float gray = 0.2989f * r + 0.5870f * g + 0.1140f * b;
preprocessing_set_4d(grayscale, gray, ix, iy, 0, 0);
}
}
return grayscale;
}
static inline sd::Tensor<float> tensor_hypot(const sd::Tensor<float>& x, const sd::Tensor<float>& y) {
sd::tensor_check_same_shape(x, y);
sd::Tensor<float> out(x.shape());
for (int64_t i = 0; i < out.numel(); ++i) {
out[i] = std::sqrt(x[i] * x[i] + y[i] * y[i]);
}
return out;
}
static inline sd::Tensor<float> tensor_arctan2(const sd::Tensor<float>& x, const sd::Tensor<float>& y) {
sd::tensor_check_same_shape(x, y);
sd::Tensor<float> out(x.shape());
for (int64_t i = 0; i < out.numel(); ++i) {
out[i] = std::atan2(y[i], x[i]);
}
return out;
}
static inline void normalize_tensor(sd::Tensor<float>* g) {
GGML_ASSERT(g != nullptr);
if (g->empty()) {
return;
}
float max_value = -std::numeric_limits<float>::infinity();
for (int64_t i = 0; i < g->numel(); ++i) {
max_value = std::max(max_value, (*g)[i]);
}
if (max_value == 0.0f || !std::isfinite(max_value)) {
return;
}
*g *= (1.0f / max_value);
}
static inline sd::Tensor<float> non_max_supression(const sd::Tensor<float>& G, const sd::Tensor<float>& D) {
GGML_ASSERT(G.shape() == D.shape());
sd::Tensor<float> result = sd::Tensor<float>::zeros(G.shape());
for (int64_t iy = 1; iy < result.shape()[1] - 1; ++iy) {
for (int64_t ix = 1; ix < result.shape()[0] - 1; ++ix) {
float angle = preprocessing_get_4d(D, ix, iy, 0, 0) * 180.0f / M_PI_;
angle = angle < 0.0f ? angle + 180.0f : angle;
float q = 1.0f;
float r = 1.0f;
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180.0f)) {
q = preprocessing_get_4d(G, ix, iy + 1, 0, 0);
r = preprocessing_get_4d(G, ix, iy - 1, 0, 0);
} else if (22.5f >= angle && angle < 67.5f) {
q = preprocessing_get_4d(G, ix + 1, iy - 1, 0, 0);
r = preprocessing_get_4d(G, ix - 1, iy + 1, 0, 0);
} else if (67.5f >= angle && angle < 112.5f) {
q = preprocessing_get_4d(G, ix + 1, iy, 0, 0);
r = preprocessing_get_4d(G, ix - 1, iy, 0, 0);
} else if (112.5f >= angle && angle < 157.5f) {
q = preprocessing_get_4d(G, ix - 1, iy - 1, 0, 0);
r = preprocessing_get_4d(G, ix + 1, iy + 1, 0, 0);
}
float cur = preprocessing_get_4d(G, ix, iy, 0, 0);
preprocessing_set_4d(result, (cur >= q && cur >= r) ? cur : 0.0f, ix, iy, 0, 0);
}
}
return result;
}
static inline void threshold_hystersis(sd::Tensor<float>* img, float high_threshold, float low_threshold, float weak, float strong) {
GGML_ASSERT(img != nullptr);
if (img->empty()) {
return;
}
float max_value = -std::numeric_limits<float>::infinity();
for (int64_t i = 0; i < img->numel(); ++i) {
max_value = std::max(max_value, (*img)[i]);
}
float ht = max_value * high_threshold;
float lt = ht * low_threshold;
for (int64_t i = 0; i < img->numel(); ++i) {
float img_v = (*img)[i];
if (img_v >= ht) {
(*img)[i] = strong;
} else if (img_v <= ht && img_v >= lt) {
(*img)[i] = weak;
}
}
for (int64_t iy = 0; iy < img->shape()[1]; ++iy) {
for (int64_t ix = 0; ix < img->shape()[0]; ++ix) {
if (!(ix >= 3 && ix <= img->shape()[0] - 3 && iy >= 3 && iy <= img->shape()[1] - 3)) {
preprocessing_set_4d(*img, 0.0f, ix, iy, 0, 0);
}
}
}
for (int64_t iy = 1; iy < img->shape()[1] - 1; ++iy) {
for (int64_t ix = 1; ix < img->shape()[0] - 1; ++ix) {
float imd_v = preprocessing_get_4d(*img, ix, iy, 0, 0);
if (imd_v == weak) {
bool has_strong_neighbor =
preprocessing_get_4d(*img, ix + 1, iy - 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix + 1, iy, 0, 0) == strong ||
preprocessing_get_4d(*img, ix, iy - 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix, iy + 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix - 1, iy - 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix - 1, iy, 0, 0) == strong;
preprocessing_set_4d(*img, has_strong_neighbor ? strong : 0.0f, ix, iy, 0, 0);
}
}
}
}
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
float kX[9] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
float kY[9] = {
1, 2, 1,
0, 0, 0,
-1, -2, -1};
sd::Tensor<float> gkernel = gaussian_kernel_tensor(5);
sd::Tensor<float> sf_kx({3, 3, 1, 1}, std::vector<float>(kX, kX + 9));
sd::Tensor<float> sf_ky({3, 3, 1, 1}, std::vector<float>(kY, kY + 9));
sd::Tensor<float> image = sd_image_to_preprocessing_tensor(img);
sd::Tensor<float> image_gray = grayscale_tensor(image);
image_gray = convolve_tensor(image_gray, gkernel, 2);
sd::Tensor<float> iX = convolve_tensor(image_gray, sf_kx, 1);
sd::Tensor<float> iY = convolve_tensor(image_gray, sf_ky, 1);
sd::Tensor<float> G = tensor_hypot(iX, iY);
normalize_tensor(&G);
sd::Tensor<float> theta = tensor_arctan2(iX, iY);
image_gray = non_max_supression(G, theta);
threshold_hystersis(&image_gray, high_threshold, low_threshold, weak, strong);
for (uint32_t iy = 0; iy < img.height; ++iy) {
for (uint32_t ix = 0; ix < img.width; ++ix) {
float gray = preprocessing_get_4d(image_gray, ix, iy, 0, 0);
gray = inverse ? 1.0f - gray : gray;
for (uint32_t c = 0; c < img.channel; ++c) {
preprocessing_set_4d(image, gray, ix, iy, c, 0);
}
}
}
preprocessing_tensor_to_sd_image(image, img.data);
return true;
}
#endif // __PREPROCESSING_HPP__

View File

@ -3,9 +3,8 @@
#include <memory> #include <memory>
#include "common.hpp" #include "common_block.hpp"
#include "flux.hpp" #include "flux.hpp"
#include "ggml_extend.hpp"
namespace Qwen { namespace Qwen {
constexpr int QWEN_IMAGE_GRAPH_SIZE = 20480; constexpr int QWEN_IMAGE_GRAPH_SIZE = 20480;
@ -27,9 +26,9 @@ namespace Qwen {
blocks["linear_2"] = std::shared_ptr<GGMLBlock>(new Linear(time_embed_dim, out_dim, sample_proj_bias)); blocks["linear_2"] = std::shared_ptr<GGMLBlock>(new Linear(time_embed_dim, out_dim, sample_proj_bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* sample, ggml_tensor* sample,
struct ggml_tensor* condition = nullptr) { ggml_tensor* condition = nullptr) {
if (condition != nullptr) { if (condition != nullptr) {
auto cond_proj = std::dynamic_pointer_cast<Linear>(blocks["cond_proj"]); auto cond_proj = std::dynamic_pointer_cast<Linear>(blocks["cond_proj"]);
sample = ggml_add(ctx->ggml_ctx, sample, cond_proj->forward(ctx, condition)); sample = ggml_add(ctx->ggml_ctx, sample, cond_proj->forward(ctx, condition));
@ -50,8 +49,8 @@ namespace Qwen {
blocks["timestep_embedder"] = std::shared_ptr<GGMLBlock>(new TimestepEmbedding(256, embedding_dim)); blocks["timestep_embedder"] = std::shared_ptr<GGMLBlock>(new TimestepEmbedding(256, embedding_dim));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* timesteps) { ggml_tensor* timesteps) {
// timesteps: [N,] // timesteps: [N,]
// return: [N, embedding_dim] // return: [N, embedding_dim]
auto timestep_embedder = std::dynamic_pointer_cast<TimestepEmbedding>(blocks["timestep_embedder"]); auto timestep_embedder = std::dynamic_pointer_cast<TimestepEmbedding>(blocks["timestep_embedder"]);
@ -108,10 +107,10 @@ namespace Qwen {
} }
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx, std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
struct ggml_tensor* img, ggml_tensor* img,
struct ggml_tensor* txt, ggml_tensor* txt,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr) { ggml_tensor* mask = nullptr) {
// img: [N, n_img_token, hidden_size] // img: [N, n_img_token, hidden_size]
// txt: [N, n_txt_token, hidden_size] // txt: [N, n_txt_token, hidden_size]
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2] // pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
@ -250,11 +249,11 @@ namespace Qwen {
} }
virtual std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx, virtual std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
struct ggml_tensor* img, ggml_tensor* img,
struct ggml_tensor* txt, ggml_tensor* txt,
struct ggml_tensor* t_emb, ggml_tensor* t_emb,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* modulate_index = nullptr) { ggml_tensor* modulate_index = nullptr) {
// img: [N, n_img_token, hidden_size] // img: [N, n_img_token, hidden_size]
// txt: [N, n_txt_token, hidden_size] // txt: [N, n_txt_token, hidden_size]
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2] // pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
@ -326,9 +325,9 @@ namespace Qwen {
blocks["linear"] = std::shared_ptr<GGMLBlock>(new Linear(conditioning_embedding_dim, embedding_dim * 2, bias)); blocks["linear"] = std::shared_ptr<GGMLBlock>(new Linear(conditioning_embedding_dim, embedding_dim * 2, bias));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
// return: [N, n_token, patch_size * patch_size * out_channels] // return: [N, n_token, patch_size * patch_size * out_channels]
@ -390,75 +389,12 @@ namespace Qwen {
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, params.patch_size * params.patch_size * params.out_channels)); blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, params.patch_size * params.patch_size * params.out_channels));
} }
struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x,
int64_t W = x->ne[0]; ggml_tensor* timestep,
int64_t H = x->ne[1]; ggml_tensor* context,
ggml_tensor* pe,
int pad_h = (params.patch_size - H % params.patch_size) % params.patch_size; ggml_tensor* modulate_index = nullptr) {
int pad_w = (params.patch_size - W % params.patch_size) % params.patch_size;
x = ggml_ext_pad(ctx->ggml_ctx, x, pad_w, pad_h, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled);
return x;
}
struct ggml_tensor* patchify(struct ggml_context* ctx,
struct ggml_tensor* x) {
// x: [N, C, H, W]
// return: [N, h*w, C * patch_size * patch_size]
int64_t N = x->ne[3];
int64_t C = x->ne[2];
int64_t H = x->ne[1];
int64_t W = x->ne[0];
int64_t p = params.patch_size;
int64_t h = H / params.patch_size;
int64_t w = W / params.patch_size;
GGML_ASSERT(h * p == H && w * p == W);
x = ggml_reshape_4d(ctx, x, p, w, p, h * C * N); // [N*C*h, p, w, p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, p, p]
x = ggml_reshape_4d(ctx, x, p * p, w * h, C, N); // [N, C, h*w, p*p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, h*w, C, p*p]
x = ggml_reshape_3d(ctx, x, p * p * C, w * h, N); // [N, h*w, C*p*p]
return x;
}
struct ggml_tensor* process_img(GGMLRunnerContext* ctx,
struct ggml_tensor* x) {
x = pad_to_patch_size(ctx, x);
x = patchify(ctx->ggml_ctx, x);
return x;
}
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
struct ggml_tensor* x,
int64_t h,
int64_t w) {
// x: [N, h*w, C*patch_size*patch_size]
// return: [N, C, H, W]
int64_t N = x->ne[2];
int64_t C = x->ne[0] / params.patch_size / params.patch_size;
int64_t H = h * params.patch_size;
int64_t W = w * params.patch_size;
int64_t p = params.patch_size;
GGML_ASSERT(C * p * p == x->ne[0]);
x = ggml_reshape_4d(ctx, x, p * p, C, w * h, N); // [N, h*w, C, p*p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N, C, h*w, p*p]
x = ggml_reshape_4d(ctx, x, p, p, w, h * C * N); // [N*C*h, w, p, p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, p, w, p]
x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*p, w*p]
return x;
}
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
struct ggml_tensor* x,
struct ggml_tensor* timestep,
struct ggml_tensor* context,
struct ggml_tensor* pe,
struct ggml_tensor* modulate_index = nullptr) {
auto time_text_embed = std::dynamic_pointer_cast<QwenTimestepProjEmbeddings>(blocks["time_text_embed"]); auto time_text_embed = std::dynamic_pointer_cast<QwenTimestepProjEmbeddings>(blocks["time_text_embed"]);
auto txt_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["txt_norm"]); auto txt_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["txt_norm"]);
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]); auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
@ -468,7 +404,7 @@ namespace Qwen {
auto t_emb = time_text_embed->forward(ctx, timestep); auto t_emb = time_text_embed->forward(ctx, timestep);
if (params.zero_cond_t) { if (params.zero_cond_t) {
auto t_emb_0 = time_text_embed->forward(ctx, ggml_ext_zeros(ctx->ggml_ctx, timestep->ne[0], timestep->ne[1], timestep->ne[2], timestep->ne[3])); auto t_emb_0 = time_text_embed->forward(ctx, ggml_ext_zeros_like(ctx->ggml_ctx, timestep));
t_emb = ggml_concat(ctx->ggml_ctx, t_emb, t_emb_0, 1); t_emb = ggml_concat(ctx->ggml_ctx, t_emb, t_emb_0, 1);
} }
auto img = img_in->forward(ctx, x); auto img = img_in->forward(ctx, x);
@ -493,13 +429,13 @@ namespace Qwen {
return img; return img;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* pe, ggml_tensor* pe,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
struct ggml_tensor* modulate_index = nullptr) { ggml_tensor* modulate_index = nullptr) {
// Forward pass of DiT. // Forward pass of DiT.
// x: [N, C, H, W] // x: [N, C, H, W]
// timestep: [N,] // timestep: [N,]
@ -512,19 +448,16 @@ namespace Qwen {
int64_t C = x->ne[2]; int64_t C = x->ne[2];
int64_t N = x->ne[3]; int64_t N = x->ne[3];
auto img = process_img(ctx, x); auto img = DiT::pad_and_patchify(ctx, x, params.patch_size, params.patch_size);
int64_t img_tokens = img->ne[1]; int64_t img_tokens = img->ne[1];
if (ref_latents.size() > 0) { if (ref_latents.size() > 0) {
for (ggml_tensor* ref : ref_latents) { for (ggml_tensor* ref : ref_latents) {
ref = process_img(ctx, ref); ref = DiT::pad_and_patchify(ctx, ref, params.patch_size, params.patch_size);
img = ggml_concat(ctx->ggml_ctx, img, ref, 1); img = ggml_concat(ctx->ggml_ctx, img, ref, 1);
} }
} }
int64_t h_len = ((H + (params.patch_size / 2)) / params.patch_size);
int64_t w_len = ((W + (params.patch_size / 2)) / params.patch_size);
auto out = forward_orig(ctx, img, timestep, context, pe, modulate_index); // [N, h_len*w_len, ph*pw*C] auto out = forward_orig(ctx, img, timestep, context, pe, modulate_index); // [N, h_len*w_len, ph*pw*C]
if (out->ne[1] > img_tokens) { if (out->ne[1] > img_tokens) {
@ -533,11 +466,7 @@ namespace Qwen {
out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, out, 0, 2, 1, 3)); // [N, h*w, C * patch_size * patch_size] out = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, out, 0, 2, 1, 3)); // [N, h*w, C * patch_size * patch_size]
} }
out = unpatchify(ctx->ggml_ctx, out, h_len, w_len); // [N, C, H + pad_h, W + pad_w] out = DiT::unpatchify_and_crop(ctx->ggml_ctx, out, H, W, params.patch_size, params.patch_size); // [N, C, H, W]
// slice
out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, H); // [N, C, H, W + pad_w]
out = ggml_ext_slice(ctx->ggml_ctx, out, 0, 0, W); // [N, C, H, W]
return out; return out;
} }
@ -592,24 +521,25 @@ namespace Qwen {
return "qwen_image"; return "qwen_image";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
qwen_image.get_param_tensors(tensors, prefix); qwen_image.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor,
std::vector<ggml_tensor*> ref_latents = {}, const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
bool increase_ref_index = false) { bool increase_ref_index = false) {
ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
struct ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE); GGML_ASSERT(!context_tensor.empty());
ggml_tensor* context = make_input(context_tensor);
x = to_backend(x); std::vector<ggml_tensor*> ref_latents;
context = to_backend(context); ref_latents.reserve(ref_latents_tensor.size());
timesteps = to_backend(timesteps); for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
} }
pe_vec = Rope::gen_qwen_image_pe(static_cast<int>(x->ne[1]), pe_vec = Rope::gen_qwen_image_pe(static_cast<int>(x->ne[1]),
@ -658,67 +588,72 @@ namespace Qwen {
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = qwen_image.forward(&runner_ctx, ggml_tensor* out = qwen_image.forward(&runner_ctx,
x, x,
timesteps, timesteps,
context, context,
pe, pe,
ref_latents, ref_latents,
modulate_index); modulate_index);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context,
std::vector<ggml_tensor*> ref_latents = {}, const std::vector<sd::Tensor<float>>& ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, ref_latents, increase_ref_index); return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1); // auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
auto x = load_tensor_from_file(work_ctx, "./qwen_image_x.bin"); auto x = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_x.bin");
print_ggml_tensor(x); print_sd_tensor(x);
std::vector<float> timesteps_vec(1, 1000.f); std::vector<float> timesteps_vec(1, 1000.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec); auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 3584, 256, 1); // auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 3584, 256, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin"); auto context = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_context.bin");
print_ggml_tensor(context); print_sd_tensor(context);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, {}, false, &out, work_ctx); auto out_opt = compute(8,
int64_t t1 = ggml_time_ms(); x,
timesteps,
context,
{},
false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("qwen_image test done in %lldms", t1 - t0); LOG_DEBUG("qwen_image test done in %lldms", t1 - t0);
} }
} }

View File

@ -43,7 +43,7 @@ namespace Rope {
__STATIC_INLINE__ std::vector<std::vector<float>> rope(const std::vector<float>& pos, __STATIC_INLINE__ std::vector<std::vector<float>> rope(const std::vector<float>& pos,
int dim, int dim,
int theta, float theta,
const std::vector<int>& axis_wrap_dims = {}) { const std::vector<int>& axis_wrap_dims = {}) {
assert(dim % 2 == 0); assert(dim % 2 == 0);
int half_dim = dim / 2; int half_dim = dim / 2;
@ -167,7 +167,7 @@ namespace Rope {
__STATIC_INLINE__ std::vector<float> embed_nd(const std::vector<std::vector<float>>& ids, __STATIC_INLINE__ std::vector<float> embed_nd(const std::vector<std::vector<float>>& ids,
int bs, int bs,
int theta, const std::vector<float>& axis_thetas,
const std::vector<int>& axes_dim, const std::vector<int>& axes_dim,
const std::vector<std::vector<int>>& wrap_dims = {}) { const std::vector<std::vector<int>>& wrap_dims = {}) {
std::vector<std::vector<float>> trans_ids = transpose(ids); std::vector<std::vector<float>> trans_ids = transpose(ids);
@ -188,8 +188,12 @@ namespace Rope {
if (!wrap_dims.empty() && i < (int)wrap_dims.size()) { if (!wrap_dims.empty() && i < (int)wrap_dims.size()) {
axis_wrap_dims = wrap_dims[i]; axis_wrap_dims = wrap_dims[i];
} }
float axis_theta = 10000.0f;
if (!axis_thetas.empty()) {
axis_theta = axis_thetas[std::min(i, axis_thetas.size() - 1)];
}
std::vector<std::vector<float>> rope_emb = std::vector<std::vector<float>> rope_emb =
rope(trans_ids[i], axes_dim[i], theta, axis_wrap_dims); // [bs*pos_len, axes_dim[i]/2 * 2 * 2] rope(trans_ids[i], axes_dim[i], axis_theta, axis_wrap_dims); // [bs*pos_len, axes_dim[i]/2 * 2 * 2]
for (int b = 0; b < bs; ++b) { for (int b = 0; b < bs; ++b) {
for (int j = 0; j < pos_len; ++j) { for (int j = 0; j < pos_len; ++j) {
for (int k = 0; k < rope_emb[0].size(); ++k) { for (int k = 0; k < rope_emb[0].size(); ++k) {
@ -203,6 +207,15 @@ namespace Rope {
return flatten(emb); return flatten(emb);
} }
__STATIC_INLINE__ std::vector<float> embed_nd(const std::vector<std::vector<float>>& ids,
int bs,
float theta,
const std::vector<int>& axes_dim,
const std::vector<std::vector<int>>& wrap_dims = {}) {
std::vector<float> axis_thetas(axes_dim.size(), theta);
return embed_nd(ids, bs, axis_thetas, axes_dim, wrap_dims);
}
__STATIC_INLINE__ std::vector<std::vector<float>> gen_refs_ids(int patch_size, __STATIC_INLINE__ std::vector<std::vector<float>> gen_refs_ids(int patch_size,
int bs, int bs,
int axes_dim_num, int axes_dim_num,
@ -332,7 +345,7 @@ namespace Rope {
} }
} }
} }
return embed_nd(ids, bs, theta, axes_dim, wrap_dims); return embed_nd(ids, bs, static_cast<float>(theta), axes_dim, wrap_dims);
} }
__STATIC_INLINE__ std::vector<std::vector<float>> gen_qwen_image_ids(int h, __STATIC_INLINE__ std::vector<std::vector<float>> gen_qwen_image_ids(int h,
@ -421,7 +434,7 @@ namespace Rope {
} }
} }
} }
return embed_nd(ids, bs, theta, axes_dim, wrap_dims); return embed_nd(ids, bs, static_cast<float>(theta), axes_dim, wrap_dims);
} }
__STATIC_INLINE__ std::vector<std::vector<float>> gen_vid_ids(int t, __STATIC_INLINE__ std::vector<std::vector<float>> gen_vid_ids(int t,
@ -475,7 +488,7 @@ namespace Rope {
int theta, int theta,
const std::vector<int>& axes_dim) { const std::vector<int>& axes_dim) {
std::vector<std::vector<float>> ids = gen_vid_ids(t, h, w, pt, ph, pw, bs); std::vector<std::vector<float>> ids = gen_vid_ids(t, h, w, pt, ph, pw, bs);
return embed_nd(ids, bs, theta, axes_dim); return embed_nd(ids, bs, static_cast<float>(theta), axes_dim);
} }
__STATIC_INLINE__ std::vector<std::vector<float>> gen_qwen2vl_ids(int grid_h, __STATIC_INLINE__ std::vector<std::vector<float>> gen_qwen2vl_ids(int grid_h,
@ -511,7 +524,7 @@ namespace Rope {
int theta, int theta,
const std::vector<int>& axes_dim) { const std::vector<int>& axes_dim) {
std::vector<std::vector<float>> ids = gen_qwen2vl_ids(grid_h, grid_w, merge_size, window_index); std::vector<std::vector<float>> ids = gen_qwen2vl_ids(grid_h, grid_w, merge_size, window_index);
return embed_nd(ids, 1, theta, axes_dim); return embed_nd(ids, 1, static_cast<float>(theta), axes_dim);
} }
__STATIC_INLINE__ int bound_mod(int a, int m) { __STATIC_INLINE__ int bound_mod(int a, int m) {
@ -584,13 +597,13 @@ namespace Rope {
} }
} }
return embed_nd(ids, bs, theta, axes_dim, wrap_dims); return embed_nd(ids, bs, static_cast<float>(theta), axes_dim, wrap_dims);
} }
__STATIC_INLINE__ struct ggml_tensor* apply_rope(struct ggml_context* ctx, __STATIC_INLINE__ ggml_tensor* apply_rope(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
bool rope_interleaved = true) { bool rope_interleaved = true) {
// x: [N, L, n_head, d_head] // x: [N, L, n_head, d_head]
// pe: [L, d_head/2, 2, 2], [[cos, -sin], [sin, cos]] // pe: [L, d_head/2, 2, 2], [[cos, -sin], [sin, cos]]
int64_t d_head = x->ne[0]; int64_t d_head = x->ne[0];
@ -628,14 +641,14 @@ namespace Rope {
return x_out; return x_out;
} }
__STATIC_INLINE__ struct ggml_tensor* attention(GGMLRunnerContext* ctx, __STATIC_INLINE__ ggml_tensor* attention(GGMLRunnerContext* ctx,
struct ggml_tensor* q, ggml_tensor* q,
struct ggml_tensor* k, ggml_tensor* k,
struct ggml_tensor* v, ggml_tensor* v,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask, ggml_tensor* mask,
float kv_scale = 1.0f, float kv_scale = 1.0f,
bool rope_interleaved = true) { bool rope_interleaved = true) {
// q,k,v: [N, L, n_head, d_head] // q,k,v: [N, L, n_head, d_head]
// pe: [L, d_head/2, 2, 2] // pe: [L, d_head/2, 2, 2]
// return: [N, L, n_head*d_head] // return: [N, L, n_head*d_head]

361
src/sample-cache.cpp Normal file
View File

@ -0,0 +1,361 @@
#include "sample-cache.h"
namespace sd_sample {
static float get_cache_reuse_threshold(const sd_cache_params_t& params) {
float reuse_threshold = params.reuse_threshold;
if (reuse_threshold == INFINITY) {
if (params.mode == SD_CACHE_EASYCACHE) {
reuse_threshold = 0.2f;
} else if (params.mode == SD_CACHE_UCACHE) {
reuse_threshold = 1.0f;
}
}
return std::max(0.0f, reuse_threshold);
}
bool SampleCacheRuntime::easycache_enabled() const {
return mode == SampleCacheMode::EASYCACHE;
}
bool SampleCacheRuntime::ucache_enabled() const {
return mode == SampleCacheMode::UCACHE;
}
bool SampleCacheRuntime::cachedit_enabled() const {
return mode == SampleCacheMode::CACHEDIT;
}
static bool has_valid_cache_percent_range(const sd_cache_params_t& cache_params) {
if (cache_params.mode != SD_CACHE_EASYCACHE && cache_params.mode != SD_CACHE_UCACHE) {
return true;
}
return cache_params.start_percent >= 0.0f &&
cache_params.start_percent < 1.0f &&
cache_params.end_percent > 0.0f &&
cache_params.end_percent <= 1.0f &&
cache_params.start_percent < cache_params.end_percent;
}
static void init_easycache_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
Denoiser* denoiser) {
if (!sd_version_is_dit(version)) {
LOG_WARN("EasyCache requested but not supported for this model type");
return;
}
EasyCacheConfig config;
config.enabled = true;
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
config.start_percent = cache_params.start_percent;
config.end_percent = cache_params.end_percent;
runtime.easycache.init(config, denoiser);
if (!runtime.easycache.enabled()) {
LOG_WARN("EasyCache requested but could not be initialized for this run");
return;
}
runtime.mode = SampleCacheMode::EASYCACHE;
LOG_INFO("EasyCache enabled - threshold: %.3f, start: %.2f, end: %.2f",
config.reuse_threshold,
config.start_percent,
config.end_percent);
}
static void init_ucache_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas) {
if (!sd_version_is_unet(version)) {
LOG_WARN("UCache requested but not supported for this model type (only UNET models)");
return;
}
UCacheConfig config;
config.enabled = true;
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
config.start_percent = cache_params.start_percent;
config.end_percent = cache_params.end_percent;
config.error_decay_rate = std::max(0.0f, std::min(1.0f, cache_params.error_decay_rate));
config.use_relative_threshold = cache_params.use_relative_threshold;
config.reset_error_on_compute = cache_params.reset_error_on_compute;
runtime.ucache.init(config, denoiser);
if (!runtime.ucache.enabled()) {
LOG_WARN("UCache requested but could not be initialized for this run");
return;
}
runtime.ucache.set_sigmas(sigmas);
runtime.mode = SampleCacheMode::UCACHE;
LOG_INFO("UCache enabled - threshold: %.3f, start: %.2f, end: %.2f, decay: %.2f, relative: %s, reset: %s",
config.reuse_threshold,
config.start_percent,
config.end_percent,
config.error_decay_rate,
config.use_relative_threshold ? "true" : "false",
config.reset_error_on_compute ? "true" : "false");
}
static void init_cachedit_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
const std::vector<float>& sigmas) {
if (!sd_version_is_dit(version)) {
LOG_WARN("CacheDIT requested but not supported for this model type (only DiT models)");
return;
}
DBCacheConfig dbcfg;
dbcfg.enabled = (cache_params.mode == SD_CACHE_DBCACHE || cache_params.mode == SD_CACHE_CACHE_DIT);
dbcfg.Fn_compute_blocks = cache_params.Fn_compute_blocks;
dbcfg.Bn_compute_blocks = cache_params.Bn_compute_blocks;
dbcfg.residual_diff_threshold = cache_params.residual_diff_threshold;
dbcfg.max_warmup_steps = cache_params.max_warmup_steps;
dbcfg.max_cached_steps = cache_params.max_cached_steps;
dbcfg.max_continuous_cached_steps = cache_params.max_continuous_cached_steps;
if (cache_params.scm_mask != nullptr && strlen(cache_params.scm_mask) > 0) {
dbcfg.steps_computation_mask = parse_scm_mask(cache_params.scm_mask);
}
dbcfg.scm_policy_dynamic = cache_params.scm_policy_dynamic;
TaylorSeerConfig tcfg;
tcfg.enabled = (cache_params.mode == SD_CACHE_TAYLORSEER || cache_params.mode == SD_CACHE_CACHE_DIT);
tcfg.n_derivatives = cache_params.taylorseer_n_derivatives;
tcfg.skip_interval_steps = cache_params.taylorseer_skip_interval;
runtime.cachedit.init(dbcfg, tcfg);
if (!runtime.cachedit.enabled()) {
LOG_WARN("CacheDIT requested but could not be initialized for this run");
return;
}
runtime.cachedit.set_sigmas(sigmas);
runtime.mode = SampleCacheMode::CACHEDIT;
LOG_INFO("CacheDIT enabled - mode: %s, Fn: %d, Bn: %d, threshold: %.3f, warmup: %d",
cache_params.mode == SD_CACHE_CACHE_DIT ? "DBCache+TaylorSeer" : (cache_params.mode == SD_CACHE_DBCACHE ? "DBCache" : "TaylorSeer"),
dbcfg.Fn_compute_blocks,
dbcfg.Bn_compute_blocks,
dbcfg.residual_diff_threshold,
dbcfg.max_warmup_steps);
}
static void init_spectrum_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
const std::vector<float>& sigmas) {
if (!sd_version_is_unet(version) && !sd_version_is_dit(version)) {
LOG_WARN("Spectrum requested but not supported for this model type (only UNET and DiT models)");
return;
}
SpectrumConfig config;
config.w = cache_params.spectrum_w;
config.m = cache_params.spectrum_m;
config.lam = cache_params.spectrum_lam;
config.window_size = cache_params.spectrum_window_size;
config.flex_window = cache_params.spectrum_flex_window;
config.warmup_steps = cache_params.spectrum_warmup_steps;
config.stop_percent = cache_params.spectrum_stop_percent;
size_t total_steps = sigmas.size() > 0 ? sigmas.size() - 1 : 0;
runtime.spectrum.init(config, total_steps);
runtime.spectrum_enabled = true;
LOG_INFO("Spectrum enabled - w: %.2f, m: %d, lam: %.2f, window: %d, flex: %.2f, warmup: %d, stop: %.0f%%",
config.w, config.m, config.lam,
config.window_size, config.flex_window,
config.warmup_steps, config.stop_percent * 100.0f);
}
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
const sd_cache_params_t* cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas) {
SampleCacheRuntime runtime;
if (cache_params == nullptr || cache_params->mode == SD_CACHE_DISABLED) {
return runtime;
}
if (!has_valid_cache_percent_range(*cache_params)) {
LOG_WARN("Cache disabled due to invalid percent range (start=%.3f, end=%.3f)",
cache_params->start_percent,
cache_params->end_percent);
return runtime;
}
switch (cache_params->mode) {
case SD_CACHE_EASYCACHE:
init_easycache_runtime(runtime, version, *cache_params, denoiser);
break;
case SD_CACHE_UCACHE:
init_ucache_runtime(runtime, version, *cache_params, denoiser, sigmas);
break;
case SD_CACHE_DBCACHE:
case SD_CACHE_TAYLORSEER:
case SD_CACHE_CACHE_DIT:
init_cachedit_runtime(runtime, version, *cache_params, sigmas);
break;
case SD_CACHE_SPECTRUM:
init_spectrum_runtime(runtime, version, *cache_params, sigmas);
break;
default:
break;
}
return runtime;
}
SampleStepCacheDispatcher::SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma)
: runtime(runtime), step(step), sigma(sigma), step_index(step > 0 ? (step - 1) : -1) {
if (step_index < 0) {
return;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
runtime.easycache.begin_step(step_index, sigma);
break;
case SampleCacheMode::UCACHE:
runtime.ucache.begin_step(step_index, sigma);
break;
case SampleCacheMode::CACHEDIT:
runtime.cachedit.begin_step(step_index, sigma);
break;
case SampleCacheMode::NONE:
break;
}
}
bool SampleStepCacheDispatcher::before_condition(const void* condition,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
if (step_index < 0 || condition == nullptr || output == nullptr) {
return false;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
return runtime.easycache.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::UCACHE:
return runtime.ucache.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::CACHEDIT:
return runtime.cachedit.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::NONE:
return false;
}
return false;
}
void SampleStepCacheDispatcher::after_condition(const void* condition,
const sd::Tensor<float>& input,
const sd::Tensor<float>& output) {
if (step_index < 0 || condition == nullptr) {
return;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
runtime.easycache.after_condition(condition, input, output);
break;
case SampleCacheMode::UCACHE:
runtime.ucache.after_condition(condition, input, output);
break;
case SampleCacheMode::CACHEDIT:
runtime.cachedit.after_condition(condition, input, output);
break;
case SampleCacheMode::NONE:
break;
}
}
bool SampleStepCacheDispatcher::is_step_skipped() const {
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
return runtime.easycache.is_step_skipped();
case SampleCacheMode::UCACHE:
return runtime.ucache.is_step_skipped();
case SampleCacheMode::CACHEDIT:
return runtime.cachedit.is_step_skipped();
case SampleCacheMode::NONE:
return false;
}
return false;
}
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps) {
if (runtime.easycache_enabled()) {
if (runtime.easycache.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.easycache.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.easycache.total_steps_skipped);
LOG_INFO("EasyCache skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.easycache.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("EasyCache skipped %d/%zu steps",
runtime.easycache.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("EasyCache completed without skipping steps");
}
}
if (runtime.ucache_enabled()) {
if (runtime.ucache.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.ucache.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.ucache.total_steps_skipped);
LOG_INFO("UCache skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.ucache.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("UCache skipped %d/%zu steps",
runtime.ucache.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("UCache completed without skipping steps");
}
}
if (runtime.cachedit_enabled()) {
if (runtime.cachedit.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.cachedit.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.cachedit.total_steps_skipped);
LOG_INFO("CacheDIT skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.cachedit.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("CacheDIT skipped %d/%zu steps",
runtime.cachedit.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("CacheDIT completed without skipping steps");
}
}
if (runtime.spectrum_enabled && runtime.spectrum.total_steps_skipped > 0 && total_steps > 0) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.spectrum.total_steps_skipped);
LOG_INFO("Spectrum skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.spectrum.total_steps_skipped,
total_steps,
speedup);
}
}
} // namespace sd_sample

61
src/sample-cache.h Normal file
View File

@ -0,0 +1,61 @@
#ifndef __SAMPLE_CACHE_H__
#define __SAMPLE_CACHE_H__
#include <vector>
#include "cache_dit.hpp"
#include "denoiser.hpp"
#include "easycache.hpp"
#include "model.h"
#include "spectrum.hpp"
#include "tensor.hpp"
#include "ucache.hpp"
#include "util.h"
namespace sd_sample {
enum class SampleCacheMode {
NONE,
EASYCACHE,
UCACHE,
CACHEDIT,
};
struct SampleCacheRuntime {
SampleCacheMode mode = SampleCacheMode::NONE;
EasyCacheState easycache;
UCacheState ucache;
CacheDitConditionState cachedit;
SpectrumState spectrum;
bool spectrum_enabled = false;
bool easycache_enabled() const;
bool ucache_enabled() const;
bool cachedit_enabled() const;
};
struct SampleStepCacheDispatcher {
SampleCacheRuntime& runtime;
int step;
float sigma;
int step_index;
SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma);
bool before_condition(const void* condition, const sd::Tensor<float>& input, sd::Tensor<float>* output);
void after_condition(const void* condition, const sd::Tensor<float>& input, const sd::Tensor<float>& output);
bool is_step_skipped() const;
};
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
const sd_cache_params_t* cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas);
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps);
} // namespace sd_sample
#endif // __SAMPLE_CACHE_H__

187
src/spectrum.hpp Normal file
View File

@ -0,0 +1,187 @@
#ifndef __SPECTRUM_HPP__
#define __SPECTRUM_HPP__
#include <cmath>
#include <cstring>
#include <vector>
#include "ggml_extend.hpp"
#include "tensor.hpp"
struct SpectrumConfig {
float w = 0.40f;
int m = 3;
float lam = 1.0f;
int window_size = 2;
float flex_window = 0.50f;
int warmup_steps = 4;
float stop_percent = 0.9f;
};
struct SpectrumState {
SpectrumConfig config;
int cnt = 0;
int num_cached = 0;
float curr_ws = 2.0f;
int K = 6;
int stop_step = 0;
int total_steps_skipped = 0;
std::vector<std::vector<float>> H_buf;
std::vector<float> T_buf;
void init(const SpectrumConfig& cfg, size_t total_steps) {
config = cfg;
cnt = 0;
num_cached = 0;
curr_ws = (float)cfg.window_size;
K = std::max(cfg.m + 1, 6);
stop_step = (int)(cfg.stop_percent * (float)total_steps);
total_steps_skipped = 0;
H_buf.clear();
T_buf.clear();
}
float taus(int step_cnt) const {
return (step_cnt / 50.0f) * 2.0f - 1.0f;
}
bool should_predict() {
if (cnt < config.warmup_steps)
return false;
if (stop_step > 0 && cnt >= stop_step)
return false;
if ((int)H_buf.size() < 2)
return false;
int ws = std::max(1, (int)std::floor(curr_ws));
return (num_cached + 1) % ws != 0;
}
void update(const sd::Tensor<float>& denoised) {
H_buf.emplace_back(denoised.data(), denoised.data() + denoised.numel());
T_buf.push_back(taus(cnt));
while ((int)H_buf.size() > K) {
H_buf.erase(H_buf.begin());
T_buf.erase(T_buf.begin());
}
if (cnt >= config.warmup_steps)
curr_ws += config.flex_window;
num_cached = 0;
cnt++;
}
void predict(sd::Tensor<float>* denoised) {
GGML_ASSERT(denoised != nullptr);
int64_t F = (int64_t)H_buf[0].size();
int K_curr = (int)H_buf.size();
int M1 = config.m + 1;
float tau_at = taus(cnt);
std::vector<float> X(K_curr * M1);
for (int i = 0; i < K_curr; i++) {
X[i * M1] = 1.0f;
if (M1 > 1)
X[i * M1 + 1] = T_buf[i];
for (int j = 2; j < M1; j++)
X[i * M1 + j] = 2.0f * T_buf[i] * X[i * M1 + j - 1] - X[i * M1 + j - 2];
}
std::vector<float> x_star(M1);
x_star[0] = 1.0f;
if (M1 > 1)
x_star[1] = tau_at;
for (int j = 2; j < M1; j++)
x_star[j] = 2.0f * tau_at * x_star[j - 1] - x_star[j - 2];
std::vector<float> XtX(M1 * M1, 0.0f);
for (int i = 0; i < M1; i++) {
for (int j = 0; j < M1; j++) {
float sum = 0.0f;
for (int k = 0; k < K_curr; k++)
sum += X[k * M1 + i] * X[k * M1 + j];
XtX[i * M1 + j] = sum + (i == j ? config.lam : 0.0f);
}
}
std::vector<float> L(M1 * M1, 0.0f);
if (!cholesky_decompose(XtX.data(), L.data(), M1)) {
float trace = 0.0f;
for (int i = 0; i < M1; i++)
trace += XtX[i * M1 + i];
for (int i = 0; i < M1; i++)
XtX[i * M1 + i] += 1e-4f * trace / M1;
cholesky_decompose(XtX.data(), L.data(), M1);
}
std::vector<float> v(M1);
cholesky_solve(L.data(), x_star.data(), v.data(), M1);
std::vector<float> weights(K_curr, 0.0f);
for (int k = 0; k < K_curr; k++)
for (int j = 0; j < M1; j++)
weights[k] += X[k * M1 + j] * v[j];
float* out = denoised->data();
float w_cheb = config.w;
float w_taylor = 1.0f - w_cheb;
const float* h_last = H_buf.back().data();
const float* h_prev = H_buf[H_buf.size() - 2].data();
for (int64_t f = 0; f < F; f++) {
float pred_cheb = 0.0f;
for (int k = 0; k < K_curr; k++)
pred_cheb += weights[k] * H_buf[k][f];
float pred_taylor = h_last[f] + 0.5f * (h_last[f] - h_prev[f]);
out[f] = w_taylor * pred_taylor + w_cheb * pred_cheb;
}
num_cached++;
total_steps_skipped++;
cnt++;
}
private:
static bool cholesky_decompose(const float* A, float* L, int n) {
std::memset(L, 0, n * n * sizeof(float));
for (int i = 0; i < n; i++) {
for (int j = 0; j <= i; j++) {
float sum = 0.0f;
for (int k = 0; k < j; k++)
sum += L[i * n + k] * L[j * n + k];
if (i == j) {
float diag = A[i * n + i] - sum;
if (diag <= 0.0f)
return false;
L[i * n + j] = std::sqrt(diag);
} else {
L[i * n + j] = (A[i * n + j] - sum) / L[j * n + j];
}
}
}
return true;
}
static void cholesky_solve(const float* L, const float* b, float* x, int n) {
std::vector<float> y(n);
for (int i = 0; i < n; i++) {
float sum = 0.0f;
for (int j = 0; j < i; j++)
sum += L[i * n + j] * y[j];
y[i] = (b[i] - sum) / L[i * n + i];
}
for (int i = n - 1; i >= 0; i--) {
float sum = 0.0f;
for (int j = i + 1; j < n; j++)
sum += L[j * n + i] * x[j];
x[i] = (y[i] - sum) / L[i * n + i];
}
}
};
#endif // __SPECTRUM_HPP__

3565
src/stable-diffusion.cpp Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -37,7 +37,7 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [n, n_in, h, w] // x: [n, n_in, h, w]
// return: [n, n_out, h, w] // return: [n, n_out, h, w]
@ -107,7 +107,7 @@ public:
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1})); blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [n, in_channels, h, w] // x: [n, in_channels, h, w]
// return: [n, z_channels, h/8, w/8] // return: [n, z_channels, h/8, w/8]
@ -157,7 +157,7 @@ public:
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1})); blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
// z: [n, z_channels, h, w] // z: [n, z_channels, h, w]
// return: [n, out_channels, h*8, w*8] // return: [n, out_channels, h*8, w*8]
@ -192,7 +192,7 @@ public:
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels * stride, channels, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false)); blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels * stride, channels, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]); auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
auto h = x; auto h = x;
if (stride != 1) { if (stride != 1) {
@ -212,7 +212,7 @@ public:
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, channels * stride, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false)); blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, channels * stride, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]); auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
auto h = conv->forward(ctx, x); auto h = conv->forward(ctx, x);
if (stride != 1) { if (stride != 1) {
@ -236,7 +236,7 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* past) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* past) {
// x: [n, channels, h, w] // x: [n, channels, h, w]
auto conv0 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.0"]); auto conv0 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.0"]);
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.2"]); auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.2"]);
@ -260,10 +260,10 @@ public:
} }
}; };
struct ggml_tensor* patchify(struct ggml_context* ctx, ggml_tensor* patchify(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t patch_size, int64_t patch_size,
int64_t b = 1) { int64_t b = 1) {
// x: [f, b*c, h*q, w*r] // x: [f, b*c, h*q, w*r]
// return: [f, b*c*r*q, h, w] // return: [f, b*c*r*q, h, w]
if (patch_size == 1) { if (patch_size == 1) {
@ -289,10 +289,10 @@ struct ggml_tensor* patchify(struct ggml_context* ctx,
return x; return x;
} }
struct ggml_tensor* unpatchify(struct ggml_context* ctx, ggml_tensor* unpatchify(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t patch_size, int64_t patch_size,
int64_t b = 1) { int64_t b = 1) {
// x: [f, b*c*r*q, h, w] // x: [f, b*c*r*q, h, w]
// return: [f, b*c, h*q, w*r] // return: [f, b*c, h*q, w*r]
if (patch_size == 1) { if (patch_size == 1) {
@ -339,7 +339,7 @@ public:
blocks[std::to_string(index)] = std::shared_ptr<GGMLBlock>(new Conv2d(hidden, z_channels, {3, 3}, {1, 1}, {1, 1})); blocks[std::to_string(index)] = std::shared_ptr<GGMLBlock>(new Conv2d(hidden, z_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["0"]); auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["0"]);
if (patch_size > 1) { if (patch_size > 1) {
@ -396,7 +396,7 @@ public:
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels[num_layers], out_channels * patch_size * patch_size, {3, 3}, {1, 1}, {1, 1})); blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels[num_layers], out_channels * patch_size * patch_size, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["1"]); auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["1"]);
// Clamp() // Clamp()
@ -442,11 +442,13 @@ protected:
bool decode_only; bool decode_only;
SDVersion version; SDVersion version;
public:
int z_channels = 16;
public: public:
TAEHV(bool decode_only = true, SDVersion version = VERSION_WAN2) TAEHV(bool decode_only = true, SDVersion version = VERSION_WAN2)
: decode_only(decode_only), version(version) { : decode_only(decode_only), version(version) {
int z_channels = 16; int patch = 1;
int patch = 1;
if (version == VERSION_WAN2_2_TI2V) { if (version == VERSION_WAN2_2_TI2V) {
z_channels = 48; z_channels = 48;
patch = 2; patch = 2;
@ -457,7 +459,7 @@ public:
} }
} }
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) { ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
auto decoder = std::dynamic_pointer_cast<TinyVideoDecoder>(blocks["decoder"]); auto decoder = std::dynamic_pointer_cast<TinyVideoDecoder>(blocks["decoder"]);
if (sd_version_is_wan(version)) { if (sd_version_is_wan(version)) {
// (W, H, C, T) -> (W, H, T, C) // (W, H, C, T) -> (W, H, T, C)
@ -471,7 +473,7 @@ public:
return result; return result;
} }
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto encoder = std::dynamic_pointer_cast<TinyVideoEncoder>(blocks["encoder"]); auto encoder = std::dynamic_pointer_cast<TinyVideoEncoder>(blocks["encoder"]);
// (W, H, T, C) -> (W, H, C, T) // (W, H, T, C) -> (W, H, C, T)
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 1, 3, 2)); x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 1, 3, 2));
@ -494,10 +496,12 @@ protected:
bool decode_only; bool decode_only;
bool taef2 = false; bool taef2 = false;
public:
int z_channels = 4;
public: public:
TAESD(bool decode_only = true, SDVersion version = VERSION_SD1) TAESD(bool decode_only = true, SDVersion version = VERSION_SD1)
: decode_only(decode_only) { : decode_only(decode_only) {
int z_channels = 4;
bool use_midblock_gn = false; bool use_midblock_gn = false;
taef2 = sd_version_is_flux2(version); taef2 = sd_version_is_flux2(version);
@ -515,7 +519,7 @@ public:
} }
} }
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) { ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
auto decoder = std::dynamic_pointer_cast<TinyDecoder>(blocks["decoder.layers"]); auto decoder = std::dynamic_pointer_cast<TinyDecoder>(blocks["decoder.layers"]);
if (taef2) { if (taef2) {
z = unpatchify(ctx->ggml_ctx, z, 2); z = unpatchify(ctx->ggml_ctx, z, 2);
@ -523,7 +527,7 @@ public:
return decoder->forward(ctx, z); return decoder->forward(ctx, z);
} }
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto encoder = std::dynamic_pointer_cast<TinyEncoder>(blocks["encoder.layers"]); auto encoder = std::dynamic_pointer_cast<TinyEncoder>(blocks["encoder.layers"]);
auto z = encoder->forward(ctx, x); auto z = encoder->forward(ctx, x);
if (taef2) { if (taef2) {
@ -533,20 +537,7 @@ public:
} }
}; };
struct TinyAutoEncoder : public GGMLRunner { struct TinyImageAutoEncoder : public VAE {
TinyAutoEncoder(ggml_backend_t backend, bool offload_params_to_cpu)
: GGMLRunner(backend, offload_params_to_cpu) {}
virtual bool compute(const int n_threads,
struct ggml_tensor* z,
bool decode_graph,
struct ggml_tensor** output,
struct ggml_context* output_ctx = nullptr) = 0;
virtual bool load_from_file(const std::string& file_path, int n_threads) = 0;
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) = 0;
};
struct TinyImageAutoEncoder : public TinyAutoEncoder {
TAESD taesd; TAESD taesd;
bool decode_only = false; bool decode_only = false;
@ -558,7 +549,8 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
SDVersion version = VERSION_SD1) SDVersion version = VERSION_SD1)
: decode_only(decoder_only), : decode_only(decoder_only),
taesd(decoder_only, version), taesd(decoder_only, version),
TinyAutoEncoder(backend, offload_params_to_cpu) { VAE(version, backend, offload_params_to_cpu) {
scale_input = false;
taesd.init(params_ctx, tensor_storage_map, prefix); taesd.init(params_ctx, tensor_storage_map, prefix);
} }
@ -566,60 +558,48 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
return "taesd"; return "taesd";
} }
bool load_from_file(const std::string& file_path, int n_threads) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
LOG_INFO("loading taesd from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false");
alloc_params_buffer();
std::map<std::string, ggml_tensor*> taesd_tensors;
taesd.get_param_tensors(taesd_tensors);
std::set<std::string> ignore_tensors;
if (decode_only) {
ignore_tensors.insert("encoder.");
}
ModelLoader model_loader;
if (!model_loader.init_from_file_and_convert_name(file_path)) {
LOG_ERROR("init taesd model loader from file failed: '%s'", file_path.c_str());
return false;
}
bool success = model_loader.load_tensors(taesd_tensors, ignore_tensors, n_threads);
if (!success) {
LOG_ERROR("load tae tensors from model loader failed");
return false;
}
LOG_INFO("taesd model loaded");
return success;
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
taesd.get_param_tensors(tensors, prefix); taesd.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) { sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); SD_UNUSED(rng);
z = to_backend(z); return vae_output;
auto runner_ctx = get_context(); }
struct ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
return latents;
}
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
return latents;
}
int get_encoder_output_channels(int input_channels) {
return taesd.z_channels;
}
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> _compute(const int n_threads,
struct ggml_tensor* z, const sd::Tensor<float>& z_tensor,
bool decode_graph, bool decode_graph) override {
struct ggml_tensor** output, auto get_graph = [&]() -> ggml_cgraph* {
struct ggml_context* output_ctx = nullptr) { return build_graph(z_tensor, decode_graph);
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(z, decode_graph);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim());
} }
}; };
struct TinyVideoAutoEncoder : public TinyAutoEncoder { struct TinyVideoAutoEncoder : public VAE {
TAEHV taehv; TAEHV taehv;
bool decode_only = false; bool decode_only = false;
@ -631,7 +611,8 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder {
SDVersion version = VERSION_WAN2) SDVersion version = VERSION_WAN2)
: decode_only(decoder_only), : decode_only(decoder_only),
taehv(decoder_only, version), taehv(decoder_only, version),
TinyAutoEncoder(backend, offload_params_to_cpu) { VAE(version, backend, offload_params_to_cpu) {
scale_input = false;
taehv.init(params_ctx, tensor_storage_map, prefix); taehv.init(params_ctx, tensor_storage_map, prefix);
} }
@ -639,57 +620,45 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder {
return "taehv"; return "taehv";
} }
bool load_from_file(const std::string& file_path, int n_threads) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
LOG_INFO("loading taehv from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false");
alloc_params_buffer();
std::map<std::string, ggml_tensor*> taehv_tensors;
taehv.get_param_tensors(taehv_tensors);
std::set<std::string> ignore_tensors;
if (decode_only) {
ignore_tensors.insert("encoder.");
}
ModelLoader model_loader;
if (!model_loader.init_from_file(file_path)) {
LOG_ERROR("init taehv model loader from file failed: '%s'", file_path.c_str());
return false;
}
bool success = model_loader.load_tensors(taehv_tensors, ignore_tensors, n_threads);
if (!success) {
LOG_ERROR("load tae tensors from model loader failed");
return false;
}
LOG_INFO("taehv model loaded");
return success;
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
taehv.get_param_tensors(tensors, prefix); taehv.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) { sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); SD_UNUSED(rng);
z = to_backend(z); return vae_output;
auto runner_ctx = get_context(); }
struct ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
return latents;
}
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
return latents;
}
int get_encoder_output_channels(int input_channels) {
return taehv.z_channels;
}
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> _compute(const int n_threads,
struct ggml_tensor* z, const sd::Tensor<float>& z_tensor,
bool decode_graph, bool decode_graph) override {
struct ggml_tensor** output, auto get_graph = [&]() -> ggml_cgraph* {
struct ggml_context* output_ctx = nullptr) { return build_graph(z_tensor, decode_graph);
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(z, decode_graph);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim());
} }
}; };
#endif // __TAE_HPP__ #endif // __TAE_HPP__

1249
src/tensor.hpp Normal file

File diff suppressed because it is too large Load Diff

127
src/tensor_ggml.hpp Normal file
View File

@ -0,0 +1,127 @@
#ifndef __SD_TENSOR_GGML_HPP__
#define __SD_TENSOR_GGML_HPP__
#include <array>
#include <cstring>
#include <fstream>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "ggml.h"
#include "tensor.hpp"
namespace sd {
template <typename T>
struct GGMLTypeTraits;
template <>
struct GGMLTypeTraits<float> {
static constexpr ggml_type type = GGML_TYPE_F32;
};
template <>
struct GGMLTypeTraits<ggml_fp16_t> {
static constexpr ggml_type type = GGML_TYPE_F16;
};
template <>
struct GGMLTypeTraits<int32_t> {
static constexpr ggml_type type = GGML_TYPE_I32;
};
template <>
struct GGMLTypeTraits<int64_t> {
static constexpr ggml_type type = GGML_TYPE_I64;
};
inline std::vector<int64_t> shape_from_ggml(const ggml_tensor* tensor) {
std::vector<int64_t> shape;
shape.reserve(static_cast<size_t>(ggml_n_dims(tensor)));
for (int i = 0; i < ggml_n_dims(tensor); ++i) {
shape.push_back(tensor->ne[i]);
}
return shape;
}
template <typename T>
inline Tensor<T> make_sd_tensor_from_ggml(const ggml_tensor* tensor) {
if (tensor == nullptr) {
return {};
}
if (tensor->type != GGMLTypeTraits<T>::type) {
GGML_ABORT("ggml tensor type does not match sd::Tensor type");
}
Tensor<T> result(shape_from_ggml(tensor));
if (tensor->buffer != nullptr) {
ggml_backend_tensor_get(tensor, result.data(), 0, ggml_nbytes(tensor));
} else {
std::memcpy(result.data(), tensor->data, ggml_nbytes(tensor));
}
return result;
}
template <typename T>
inline ggml_tensor* make_ggml_tensor(ggml_context* ctx, const Tensor<T>& tensor, bool copy_data = true) {
GGML_ASSERT(tensor.dim() > 0 && tensor.dim() <= 5);
int n_dims = std::min(static_cast<int>(tensor.dim()), GGML_MAX_DIMS);
std::array<int64_t, GGML_MAX_DIMS> ne = {1, 1, 1, 1};
for (int64_t i = 0; i < n_dims; ++i) {
ne[static_cast<size_t>(i)] = tensor.shape()[static_cast<size_t>(i)];
}
if (tensor.dim() == 5) {
ne[3] *= tensor.shape()[4];
}
ggml_tensor* result = ggml_new_tensor(ctx, GGMLTypeTraits<T>::type, n_dims, ne.data());
if (copy_data && tensor.numel() > 0) {
std::memcpy(result->data, tensor.data(), static_cast<size_t>(ggml_nbytes(result)));
}
return result;
}
template <typename T>
inline Tensor<T> load_tensor_from_file_as_tensor(const std::string& file_path) {
std::ifstream file(file_path, std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open tensor file: " + file_path);
}
int32_t n_dims = 0;
int32_t length = 0;
int32_t ttype = 0;
file.read(reinterpret_cast<char*>(&n_dims), sizeof(n_dims));
file.read(reinterpret_cast<char*>(&length), sizeof(length));
file.read(reinterpret_cast<char*>(&ttype), sizeof(ttype));
if (!file.good()) {
throw std::runtime_error("incomplete tensor file header: " + file_path);
}
if (static_cast<ggml_type>(ttype) != GGMLTypeTraits<T>::type) {
throw std::invalid_argument("tensor file type does not match requested sd::Tensor type");
}
std::vector<int64_t> shape(4, 1);
for (int i = 0; i < n_dims; ++i) {
int32_t dim = 1;
file.read(reinterpret_cast<char*>(&dim), sizeof(dim));
shape[static_cast<size_t>(i)] = dim;
}
std::string name(static_cast<size_t>(length), '\0');
file.read(name.data(), length);
shape.resize(static_cast<size_t>(n_dims));
Tensor<T> tensor(shape);
file.read(reinterpret_cast<char*>(tensor.data()), static_cast<std::streamsize>(tensor.numel() * sizeof(T)));
if (!file.good()) {
throw std::runtime_error("incomplete tensor file data: " + file_path);
}
return tensor;
}
} // namespace sd
#endif

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,10 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "condition_cache_utils.hpp"
#include "denoiser.hpp" #include "denoiser.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct UCacheConfig { struct UCacheConfig {
bool enabled = false; bool enabled = false;
@ -19,6 +21,7 @@ struct UCacheConfig {
bool adaptive_threshold = true; bool adaptive_threshold = true;
float early_step_multiplier = 0.5f; float early_step_multiplier = 0.5f;
float late_step_multiplier = 1.5f; float late_step_multiplier = 1.5f;
float relative_norm_gain = 1.6f;
bool reset_error_on_compute = true; bool reset_error_on_compute = true;
}; };
@ -28,15 +31,15 @@ struct UCacheCacheEntry {
struct UCacheState { struct UCacheState {
UCacheConfig config; UCacheConfig config;
Denoiser* denoiser = nullptr; Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max(); float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f; float end_sigma = 0.0f;
bool initialized = false; bool initialized = false;
bool initial_step = true; bool initial_step = true;
bool skip_current_step = false; bool skip_current_step = false;
bool step_active = false; bool step_active = false;
const SDCondition* anchor_condition = nullptr; const void* anchor_condition = nullptr;
std::unordered_map<const SDCondition*, UCacheCacheEntry> cache_diffs; std::unordered_map<const void*, UCacheCacheEntry> cache_diffs;
std::vector<float> prev_input; std::vector<float> prev_input;
std::vector<float> prev_output; std::vector<float> prev_output;
float output_prev_norm = 0.0f; float output_prev_norm = 0.0f;
@ -45,14 +48,16 @@ struct UCacheState {
bool has_output_prev_norm = false; bool has_output_prev_norm = false;
bool has_relative_transformation_rate = false; bool has_relative_transformation_rate = false;
float relative_transformation_rate = 0.0f; float relative_transformation_rate = 0.0f;
float cumulative_change_rate = 0.0f;
float last_input_change = 0.0f; float last_input_change = 0.0f;
bool has_last_input_change = false; bool has_last_input_change = false;
float output_change_ema = 0.0f;
bool has_output_change_ema = false;
int total_steps_skipped = 0; int total_steps_skipped = 0;
int current_step_index = -1; int current_step_index = -1;
int steps_computed_since_active = 0; int steps_computed_since_active = 0;
int expected_total_steps = 0;
int consecutive_skipped_steps = 0;
float accumulated_error = 0.0f; float accumulated_error = 0.0f;
float reference_output_norm = 0.0f;
struct BlockMetrics { struct BlockMetrics {
float sum_transformation_rate = 0.0f; float sum_transformation_rate = 0.0f;
@ -106,14 +111,16 @@ struct UCacheState {
has_output_prev_norm = false; has_output_prev_norm = false;
has_relative_transformation_rate = false; has_relative_transformation_rate = false;
relative_transformation_rate = 0.0f; relative_transformation_rate = 0.0f;
cumulative_change_rate = 0.0f;
last_input_change = 0.0f; last_input_change = 0.0f;
has_last_input_change = false; has_last_input_change = false;
output_change_ema = 0.0f;
has_output_change_ema = false;
total_steps_skipped = 0; total_steps_skipped = 0;
current_step_index = -1; current_step_index = -1;
steps_computed_since_active = 0; steps_computed_since_active = 0;
expected_total_steps = 0;
consecutive_skipped_steps = 0;
accumulated_error = 0.0f; accumulated_error = 0.0f;
reference_output_norm = 0.0f;
block_metrics.reset(); block_metrics.reset();
total_active_steps = 0; total_active_steps = 0;
} }
@ -133,7 +140,8 @@ struct UCacheState {
if (!initialized || sigmas.size() < 2) { if (!initialized || sigmas.size() < 2) {
return; return;
} }
size_t n_steps = sigmas.size() - 1; size_t n_steps = sigmas.size() - 1;
expected_total_steps = static_cast<int>(n_steps);
size_t start_step = static_cast<size_t>(config.start_percent * n_steps); size_t start_step = static_cast<size_t>(config.start_percent * n_steps);
size_t end_step = static_cast<size_t>(config.end_percent * n_steps); size_t end_step = static_cast<size_t>(config.end_percent * n_steps);
@ -207,11 +215,15 @@ struct UCacheState {
} }
int effective_total = estimated_total_steps; int effective_total = estimated_total_steps;
if (effective_total <= 0) {
effective_total = expected_total_steps;
}
if (effective_total <= 0) { if (effective_total <= 0) {
effective_total = std::max(20, steps_computed_since_active * 2); effective_total = std::max(20, steps_computed_since_active * 2);
} }
float progress = (effective_total > 0) ? (static_cast<float>(steps_computed_since_active) / effective_total) : 0.0f; float progress = (effective_total > 0) ? (static_cast<float>(steps_computed_since_active) / effective_total) : 0.0f;
progress = std::max(0.0f, std::min(1.0f, progress));
float multiplier = 1.0f; float multiplier = 1.0f;
if (progress < 0.2f) { if (progress < 0.2f) {
@ -223,43 +235,30 @@ struct UCacheState {
return base_threshold * multiplier; return base_threshold * multiplier;
} }
bool has_cache(const SDCondition* cond) const { bool has_cache(const void* cond) const {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
return it != cache_diffs.end() && !it->second.diff.empty(); return it != cache_diffs.end() && !it->second.diff.empty();
} }
void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
UCacheCacheEntry& entry = cache_diffs[cond]; UCacheCacheEntry& entry = cache_diffs[cond];
size_t ne = static_cast<size_t>(ggml_nelements(output)); sd::store_condition_cache_diff(&entry.diff, input, output);
entry.diff.resize(ne);
float* out_data = (float*)output->data;
float* in_data = (float*)input->data;
for (size_t i = 0; i < ne; ++i) {
entry.diff[i] = out_data[i] - in_data[i];
}
} }
void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) { if (it == cache_diffs.end() || it->second.diff.empty()) {
return; return;
} }
sd::apply_condition_cache_diff(it->second.diff, input, output);
copy_ggml_tensor(output, input);
float* out_data = (float*)output->data;
const std::vector<float>& diff = it->second.diff;
for (size_t i = 0; i < diff.size(); ++i) {
out_data[i] += diff[i];
}
} }
bool before_condition(const SDCondition* cond, bool before_condition(const void* cond,
ggml_tensor* input, const sd::Tensor<float>& input,
ggml_tensor* output, sd::Tensor<float>* output,
float sigma, float sigma,
int step_index) { int step_index) {
if (!enabled() || step_index < 0) { if (!enabled() || step_index < 0 || output == nullptr) {
return false; return false;
} }
if (step_index != current_step_index) { if (step_index != current_step_index) {
@ -292,13 +291,13 @@ struct UCacheState {
return false; return false;
} }
size_t ne = static_cast<size_t>(ggml_nelements(input)); size_t ne = static_cast<size_t>(input.numel());
if (prev_input.size() != ne) { if (prev_input.size() != ne) {
return false; return false;
} }
float* input_data = (float*)input->data; const float* input_data = input.data();
last_input_change = 0.0f; last_input_change = 0.0f;
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
last_input_change += std::fabs(input_data[i] - prev_input[i]); last_input_change += std::fabs(input_data[i] - prev_input[i]);
} }
@ -309,17 +308,31 @@ struct UCacheState {
if (has_output_prev_norm && has_relative_transformation_rate && if (has_output_prev_norm && has_relative_transformation_rate &&
last_input_change > 0.0f && output_prev_norm > 0.0f) { last_input_change > 0.0f && output_prev_norm > 0.0f) {
float approx_output_change_rate = (relative_transformation_rate * last_input_change) / output_prev_norm; float approx_output_change = relative_transformation_rate * last_input_change;
accumulated_error = accumulated_error * config.error_decay_rate + approx_output_change_rate; float approx_output_change_rate;
if (config.use_relative_threshold) {
float base_scale = std::max(output_prev_norm, 1e-6f);
float dyn_scale = has_output_change_ema
? std::max(output_change_ema * std::max(1.0f, config.relative_norm_gain), 1e-6f)
: base_scale;
float scale = std::sqrt(base_scale * dyn_scale);
approx_output_change_rate = approx_output_change / scale;
} else {
approx_output_change_rate = approx_output_change;
}
// Increase estimated error with skip horizon to avoid long extrapolation streaks
approx_output_change_rate *= (1.0f + 0.50f * consecutive_skipped_steps);
accumulated_error = accumulated_error * config.error_decay_rate + approx_output_change_rate;
float effective_threshold = get_adaptive_threshold(); float effective_threshold = get_adaptive_threshold();
if (config.use_relative_threshold && reference_output_norm > 0.0f) { if (!config.use_relative_threshold && output_prev_norm > 0.0f) {
effective_threshold = effective_threshold * reference_output_norm; effective_threshold = effective_threshold * output_prev_norm;
} }
if (accumulated_error < effective_threshold) { if (accumulated_error < effective_threshold) {
skip_current_step = true; skip_current_step = true;
total_steps_skipped++; total_steps_skipped++;
consecutive_skipped_steps++;
apply_cache(cond, input, output); apply_cache(cond, input, output);
return true; return true;
} else if (config.reset_error_on_compute) { } else if (config.reset_error_on_compute) {
@ -330,7 +343,7 @@ struct UCacheState {
return false; return false;
} }
void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) { void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
if (!step_is_active()) { if (!step_is_active()) {
return; return;
} }
@ -340,17 +353,19 @@ struct UCacheState {
if (cond != anchor_condition) { if (cond != anchor_condition) {
return; return;
} }
steps_computed_since_active++;
consecutive_skipped_steps = 0;
size_t ne = static_cast<size_t>(ggml_nelements(input)); size_t ne = static_cast<size_t>(input.numel());
float* in_data = (float*)input->data; const float* in_data = input.data();
prev_input.resize(ne); prev_input.resize(ne);
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
prev_input[i] = in_data[i]; prev_input[i] = in_data[i];
} }
has_prev_input = true; has_prev_input = true;
float* out_data = (float*)output->data; const float* out_data = output.data();
float output_change = 0.0f; float output_change = 0.0f;
if (has_prev_output && prev_output.size() == ne) { if (has_prev_output && prev_output.size() == ne) {
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
output_change += std::fabs(out_data[i] - prev_output[i]); output_change += std::fabs(out_data[i] - prev_output[i]);
@ -359,6 +374,14 @@ struct UCacheState {
output_change /= static_cast<float>(ne); output_change /= static_cast<float>(ne);
} }
} }
if (std::isfinite(output_change) && output_change > 0.0f) {
if (!has_output_change_ema) {
output_change_ema = output_change;
has_output_change_ema = true;
} else {
output_change_ema = 0.8f * output_change_ema + 0.2f * output_change;
}
}
prev_output.resize(ne); prev_output.resize(ne);
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
@ -373,10 +396,6 @@ struct UCacheState {
output_prev_norm = (ne > 0) ? (mean_abs / static_cast<float>(ne)) : 0.0f; output_prev_norm = (ne > 0) ? (mean_abs / static_cast<float>(ne)) : 0.0f;
has_output_prev_norm = output_prev_norm > 0.0f; has_output_prev_norm = output_prev_norm > 0.0f;
if (reference_output_norm == 0.0f) {
reference_output_norm = output_prev_norm;
}
if (has_last_input_change && last_input_change > 0.0f && output_change > 0.0f) { if (has_last_input_change && last_input_change > 0.0f && output_change > 0.0f) {
float rate = output_change / last_input_change; float rate = output_change / last_input_change;
if (std::isfinite(rate)) { if (std::isfinite(rate)) {

View File

@ -1,8 +1,7 @@
#ifndef __UNET_HPP__ #ifndef __UNET_HPP__
#define __UNET_HPP__ #define __UNET_HPP__
#include "common.hpp" #include "common_block.hpp"
#include "ggml_extend.hpp"
#include "model.h" #include "model.h"
/*==================================================== UnetModel =====================================================*/ /*==================================================== UnetModel =====================================================*/
@ -61,10 +60,10 @@ public:
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender()); blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context, ggml_tensor* context,
int timesteps) { int timesteps) {
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w], t == timesteps // x: [N, in_channels, h, w] aka [b*t, in_channels, h, w], t == timesteps
// context: [N, max_position(aka n_context), hidden_size(aka context_dim)] aka [b*t, n_context, context_dim], t == timesteps // context: [N, max_position(aka n_context), hidden_size(aka context_dim)] aka [b*t, n_context, context_dim], t == timesteps
// t_emb: [N, in_channels] aka [b*t, in_channels] // t_emb: [N, in_channels] aka [b*t, in_channels]
@ -389,11 +388,11 @@ public:
blocks["out.2"] = std::shared_ptr<GGMLBlock>(new Conv2d(model_channels, out_channels, {3, 3}, {1, 1}, {1, 1})); blocks["out.2"] = std::shared_ptr<GGMLBlock>(new Conv2d(model_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* resblock_forward(std::string name, ggml_tensor* resblock_forward(std::string name,
GGMLRunnerContext* ctx, GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* emb, ggml_tensor* emb,
int num_video_frames) { int num_video_frames) {
if (version == VERSION_SVD) { if (version == VERSION_SVD) {
auto block = std::dynamic_pointer_cast<VideoResBlock>(blocks[name]); auto block = std::dynamic_pointer_cast<VideoResBlock>(blocks[name]);
@ -405,11 +404,11 @@ public:
} }
} }
struct ggml_tensor* attention_layer_forward(std::string name, ggml_tensor* attention_layer_forward(std::string name,
GGMLRunnerContext* ctx, GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context, ggml_tensor* context,
int timesteps) { int timesteps) {
if (version == VERSION_SVD) { if (version == VERSION_SVD) {
auto block = std::dynamic_pointer_cast<SpatialVideoTransformer>(blocks[name]); auto block = std::dynamic_pointer_cast<SpatialVideoTransformer>(blocks[name]);
@ -421,15 +420,15 @@ public:
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timesteps, ggml_tensor* timesteps,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* c_concat = nullptr, ggml_tensor* c_concat = nullptr,
struct ggml_tensor* y = nullptr, ggml_tensor* y = nullptr,
int num_video_frames = -1, int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {}, std::vector<ggml_tensor*> controls = {},
float control_strength = 0.f) { float control_strength = 0.f) {
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w] // x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
// timesteps: [N,] // timesteps: [N,]
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768] // context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
@ -481,7 +480,7 @@ public:
} }
// input_blocks // input_blocks
std::vector<struct ggml_tensor*> hs; std::vector<ggml_tensor*> hs;
// input block 0 // input block 0
auto h = input_blocks_0_0->forward(ctx, x); auto h = input_blocks_0_0->forward(ctx, x);
@ -606,82 +605,81 @@ struct UNetModelRunner : public GGMLRunner {
return "unet"; return "unet";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
unet.get_param_tensors(tensors, prefix); unet.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor = {},
struct ggml_tensor* c_concat = nullptr, const sd::Tensor<float>& c_concat_tensor = {},
struct ggml_tensor* y = nullptr, const sd::Tensor<float>& y_tensor = {},
int num_video_frames = -1, int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {}, const std::vector<sd::Tensor<float>>& controls_tensor = {},
float control_strength = 0.f) { float control_strength = 0.f) {
struct ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
std::vector<ggml_tensor*> controls;
controls.reserve(controls_tensor.size());
for (const auto& control_tensor : controls_tensor) {
controls.push_back(make_input(control_tensor));
}
if (num_video_frames == -1) { if (num_video_frames == -1) {
num_video_frames = static_cast<int>(x->ne[3]); num_video_frames = static_cast<int>(x->ne[3]);
} }
x = to_backend(x);
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
c_concat = to_backend(c_concat);
for (int i = 0; i < controls.size(); i++) {
controls[i] = to_backend(controls[i]);
}
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = unet.forward(&runner_ctx, ggml_tensor* out = unet.forward(&runner_ctx,
x, x,
timesteps, timesteps,
context, context,
c_concat, c_concat,
y, y,
num_video_frames, num_video_frames,
controls, controls,
control_strength); control_strength);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context = {},
struct ggml_tensor* c_concat, const sd::Tensor<float>& c_concat = {},
struct ggml_tensor* y, const sd::Tensor<float>& y = {},
int num_video_frames = -1, int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {}, const std::vector<sd::Tensor<float>>& controls = {},
float control_strength = 0.f, float control_strength = 0.f) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
// c_concat: [N, in_channels, h, w] or [1, in_channels, h, w] // c_concat: [N, in_channels, h, w] or [1, in_channels, h, w]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength); return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass // CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass
@ -690,27 +688,37 @@ struct UNetModelRunner : public GGMLRunner {
// CUDA, num_video_frames = 3, x{num_video_frames, 8, 8, 8}: nan // CUDA, num_video_frames = 3, x{num_video_frames, 8, 8, 8}: nan
int num_video_frames = 3; int num_video_frames = 3;
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 8, num_video_frames); sd::Tensor<float> x({8, 8, 8, num_video_frames});
std::vector<float> timesteps_vec(num_video_frames, 999.f); std::vector<float> timesteps_vec(num_video_frames, 999.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec); auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
ggml_set_f32(x, 0.5f); x.fill_(0.5f);
// print_ggml_tensor(x); // print_ggml_tensor(x);
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 1024, 1, num_video_frames); sd::Tensor<float> context({1024, 1, num_video_frames});
ggml_set_f32(context, 0.5f); context.fill_(0.5f);
// print_ggml_tensor(context); // print_ggml_tensor(context);
auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, num_video_frames); sd::Tensor<float> y({768, num_video_frames});
ggml_set_f32(y, 0.5f); y.fill_(0.5f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx); auto out_opt = compute(8,
int64_t t1 = ggml_time_ms(); x,
timesteps,
context,
{},
y,
num_video_frames,
{},
0.f);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("unet test done in %lldms", t1 - t0); LOG_DEBUG("unet test done in %lldms", t1 - t0);
} }
} }

View File

@ -2,6 +2,7 @@
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "model.h" #include "model.h"
#include "stable-diffusion.h" #include "stable-diffusion.h"
#include "util.h"
struct UpscalerGGML { struct UpscalerGGML {
ggml_backend_t backend = nullptr; // general backend ggml_backend_t backend = nullptr; // general backend
@ -64,6 +65,39 @@ struct UpscalerGGML {
return true; return true;
} }
sd::Tensor<float> upscale_tensor(const sd::Tensor<float>& input_tensor) {
sd::Tensor<float> upscaled;
if (tile_size <= 0 || (input_tensor.shape()[0] <= tile_size && input_tensor.shape()[1] <= tile_size)) {
upscaled = esrgan_upscaler->compute(n_threads, input_tensor);
} else {
auto on_processing = [&](const sd::Tensor<float>& input_tile) -> sd::Tensor<float> {
auto output_tile = esrgan_upscaler->compute(n_threads, input_tile);
if (output_tile.empty()) {
LOG_ERROR("esrgan compute failed while processing a tile");
return {};
}
return output_tile;
};
upscaled = process_tiles_2d(input_tensor,
static_cast<int>(input_tensor.shape()[0] * esrgan_upscaler->scale),
static_cast<int>(input_tensor.shape()[1] * esrgan_upscaler->scale),
esrgan_upscaler->scale,
tile_size,
tile_size,
0.25f,
false,
false,
on_processing);
}
esrgan_upscaler->free_compute_buffer();
if (upscaled.empty()) {
LOG_ERROR("esrgan compute failed");
return {};
}
return upscaled;
}
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) { sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) {
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth // upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
sd_image_t upscaled_image = {0, 0, 0, nullptr}; sd_image_t upscaled_image = {0, 0, 0, nullptr};
@ -72,39 +106,17 @@ struct UpscalerGGML {
LOG_INFO("upscaling from (%i x %i) to (%i x %i)", LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
input_image.width, input_image.height, output_width, output_height); input_image.width, input_image.height, output_width, output_height);
struct ggml_init_params params; sd::Tensor<float> input_tensor = sd_image_to_tensor(input_image);
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G sd::Tensor<float> upscaled;
params.mem_buffer = nullptr; int64_t t0 = ggml_time_ms();
params.no_alloc = false; upscaled = upscale_tensor(input_tensor);
if (upscaled.empty()) {
// draft context
struct ggml_context* upscale_ctx = ggml_init(params);
if (!upscale_ctx) {
LOG_ERROR("ggml_init() failed");
return upscaled_image; return upscaled_image;
} }
// LOG_DEBUG("upscale work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f); sd_image_t upscaled_data = tensor_to_sd_image(upscaled);
ggml_tensor* input_image_tensor = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, input_image.width, input_image.height, 3, 1); int64_t t3 = ggml_time_ms();
sd_image_to_ggml_tensor(input_image, input_image_tensor);
ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1);
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
return esrgan_upscaler->compute(n_threads, in, &out);
};
int64_t t0 = ggml_time_ms();
sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, on_tiling);
esrgan_upscaler->free_compute_buffer();
ggml_ext_tensor_clamp_inplace(upscaled, 0.f, 1.f);
uint8_t* upscaled_data = ggml_tensor_to_sd_image(upscaled);
ggml_free(upscale_ctx);
int64_t t3 = ggml_time_ms();
LOG_INFO("input_image_tensor upscaled, taking %.2fs", (t3 - t0) / 1000.0f); LOG_INFO("input_image_tensor upscaled, taking %.2fs", (t3 - t0) / 1000.0f);
upscaled_image = { upscaled_image = upscaled_data;
(uint32_t)output_width,
(uint32_t)output_height,
3,
upscaled_data,
};
return upscaled_image; return upscaled_image;
} }
}; };

View File

@ -479,158 +479,96 @@ const char* sd_get_system_info() {
return buffer; return buffer;
} }
sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image) { sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index) {
sd_image_f32_t converted_image; const auto& shape = tensor.shape();
converted_image.width = image.width; GGML_ASSERT(shape.size() == 4 || shape.size() == 5);
converted_image.height = image.height; int width = static_cast<int>(shape[0]);
converted_image.channel = image.channel; int height = static_cast<int>(shape[1]);
int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
uint8_t* data = (uint8_t*)malloc(static_cast<size_t>(width * height * channel));
GGML_ASSERT(data != nullptr);
// Allocate memory for float data for (int iw = 0; iw < width; ++iw) {
converted_image.data = (float*)malloc(image.width * image.height * image.channel * sizeof(float)); for (int ih = 0; ih < height; ++ih) {
for (int ic = 0; ic < channel; ++ic) {
for (uint32_t i = 0; i < image.width * image.height * image.channel; i++) { float value = shape.size() == 5 ? tensor.index(iw, ih, frame_index, ic, 0)
// Convert uint8_t to float : tensor.index(iw, ih, ic, frame_index);
converted_image.data[i] = (float)image.data[i]; value = std::clamp(value, 0.0f, 1.0f);
} data[(ih * width + iw) * channel + ic] = static_cast<uint8_t>(std::round(value * 255.0f));
return converted_image;
}
// Function to perform double linear interpolation
float interpolate(float v1, float v2, float v3, float v4, float x_ratio, float y_ratio) {
return v1 * (1 - x_ratio) * (1 - y_ratio) + v2 * x_ratio * (1 - y_ratio) + v3 * (1 - x_ratio) * y_ratio + v4 * x_ratio * y_ratio;
}
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height) {
sd_image_f32_t resized_image;
resized_image.width = target_width;
resized_image.height = target_height;
resized_image.channel = image.channel;
// Allocate memory for resized float data
resized_image.data = (float*)malloc(target_width * target_height * image.channel * sizeof(float));
for (int y = 0; y < target_height; y++) {
for (int x = 0; x < target_width; x++) {
float original_x = (float)x * image.width / target_width;
float original_y = (float)y * image.height / target_height;
uint32_t x1 = (uint32_t)original_x;
uint32_t y1 = (uint32_t)original_y;
uint32_t x2 = std::min(x1 + 1, image.width - 1);
uint32_t y2 = std::min(y1 + 1, image.height - 1);
for (uint32_t k = 0; k < image.channel; k++) {
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
float x_ratio = original_x - x1;
float y_ratio = original_y - y1;
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
*(resized_image.data + y * target_width * image.channel + x * image.channel + k) = value;
} }
} }
} }
return {
return resized_image; static_cast<uint32_t>(width),
static_cast<uint32_t>(height),
static_cast<uint32_t>(channel),
data,
};
} }
void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]) { sd::Tensor<float> sd_image_to_tensor(sd_image_t image,
for (uint32_t y = 0; y < image.height; y++) { int target_width,
for (uint32_t x = 0; x < image.width; x++) { int target_height,
for (uint32_t k = 0; k < image.channel; k++) { bool scale) {
int index = (y * image.width + x) * image.channel + k; sd::Tensor<float> tensor = sd::zeros<float>({static_cast<int64_t>(image.width),
image.data[index] = (image.data[index] - means[k]) / stds[k]; static_cast<int64_t>(image.height),
static_cast<int64_t>(image.channel),
1});
for (uint32_t iw = 0; iw < image.width; ++iw) {
for (uint32_t ih = 0; ih < image.height; ++ih) {
for (uint32_t ic = 0; ic < image.channel; ++ic) {
tensor.index(iw, ih, ic, 0) = sd_image_get_f32(image, iw, ih, ic, scale);
} }
} }
} }
if (target_width >= 0 && target_height >= 0 &&
(tensor.shape()[0] != target_width || tensor.shape()[1] != target_height)) {
tensor = sd::ops::interpolate(tensor,
{target_width,
target_height,
tensor.shape()[2],
tensor.shape()[3]});
}
return tensor;
} }
// Constants for means and std // Constants for means and std
float means[3] = {0.48145466f, 0.4578275f, 0.40821073f}; float means[3] = {0.48145466f, 0.4578275f, 0.40821073f};
float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f}; float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f};
// Function to clip and preprocess sd_image_f32_t sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height) {
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height) { GGML_ASSERT(image.dim() == 4);
float width_scale = (float)target_width / image.width; GGML_ASSERT(image.shape()[2] == 3);
float height_scale = (float)target_height / image.height; GGML_ASSERT(image.shape()[3] == 1);
GGML_ASSERT(target_width > 0 && target_height > 0);
float scale = std::fmax(width_scale, height_scale); float width_scale = static_cast<float>(target_width) / static_cast<float>(image.shape()[0]);
float height_scale = static_cast<float>(target_height) / static_cast<float>(image.shape()[1]);
float scale = std::fmax(width_scale, height_scale);
// Interpolation int64_t resized_width = static_cast<int64_t>(scale * static_cast<float>(image.shape()[0]));
int resized_width = (int)(scale * image.width); int64_t resized_height = static_cast<int64_t>(scale * static_cast<float>(image.shape()[1]));
int resized_height = (int)(scale * image.height);
float* resized_data = (float*)malloc(resized_width * resized_height * image.channel * sizeof(float));
for (int y = 0; y < resized_height; y++) { sd::Tensor<float> resized = sd::ops::interpolate(
for (int x = 0; x < resized_width; x++) { image,
float original_x = (float)x * image.width / resized_width; {resized_width, resized_height, image.shape()[2], image.shape()[3]});
float original_y = (float)y * image.height / resized_height;
uint32_t x1 = (uint32_t)original_x; int64_t h_offset = std::max<int64_t>((resized_height - target_height) / 2, 0);
uint32_t y1 = (uint32_t)original_y; int64_t w_offset = std::max<int64_t>((resized_width - target_width) / 2, 0);
uint32_t x2 = std::min(x1 + 1, image.width - 1);
uint32_t y2 = std::min(y1 + 1, image.height - 1);
for (uint32_t k = 0; k < image.channel; k++) { sd::Tensor<float> cropped({target_width, target_height, image.shape()[2], image.shape()[3]});
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k); for (int64_t y = 0; y < target_height; ++y) {
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k); for (int64_t x = 0; x < target_width; ++x) {
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k); for (int64_t c = 0; c < image.shape()[2]; ++c) {
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k); cropped.index(x, y, c, 0) = resized.index(x + w_offset, y + h_offset, c, 0);
float x_ratio = original_x - x1;
float y_ratio = original_y - y1;
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
*(resized_data + y * resized_width * image.channel + x * image.channel + k) = value;
} }
} }
} }
// Clip and preprocess sd::Tensor<float> normalized = sd::ops::clamp(cropped, 0.0f, 1.0f);
int h_offset = std::max((int)(resized_height - target_height) / 2, 0); sd::Tensor<float> mean({1, 1, 3, 1}, {means[0], means[1], means[2]});
int w_offset = std::max((int)(resized_width - target_width) / 2, 0); sd::Tensor<float> std({1, 1, 3, 1}, {stds[0], stds[1], stds[2]});
return (normalized - mean) / std;
sd_image_f32_t result;
result.width = target_width;
result.height = target_height;
result.channel = image.channel;
result.data = (float*)malloc(target_height * target_width * image.channel * sizeof(float));
for (uint32_t k = 0; k < image.channel; k++) {
for (uint32_t i = 0; i < result.height; i++) {
for (uint32_t j = 0; j < result.width; j++) {
int src_y = std::min(static_cast<int>(i + h_offset), resized_height - 1);
int src_x = std::min(static_cast<int>(j + w_offset), resized_width - 1);
*(result.data + i * result.width * image.channel + j * image.channel + k) =
fmin(fmax(*(resized_data + src_y * resized_width * image.channel + src_x * image.channel + k), 0.0f), 255.0f) / 255.0f;
}
}
}
// Free allocated memory
free(resized_data);
// Normalize
for (uint32_t k = 0; k < image.channel; k++) {
for (uint32_t i = 0; i < result.height; i++) {
for (uint32_t j = 0; j < result.width; j++) {
// *(result.data + i * size * image.channel + j * image.channel + k) = 0.5f;
int offset = i * result.width * image.channel + j * image.channel + k;
float value = *(result.data + offset);
value = (value - means[k]) / stds[k];
// value = 0.5f;
*(result.data + offset) = value;
}
}
}
return result;
} }
// Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/prompt_parser.py#L345 // Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/prompt_parser.py#L345

View File

@ -7,6 +7,7 @@
#include <vector> #include <vector>
#include "stable-diffusion.h" #include "stable-diffusion.h"
#include "tensor.hpp"
#define SAFE_STR(s) ((s) ? (s) : "") #define SAFE_STR(s) ((s) ? (s) : "")
#define BOOL_STR(b) ((b) ? "true" : "false") #define BOOL_STR(b) ((b) ? "true" : "false")
@ -29,20 +30,14 @@ std::string utf32_to_utf8(const std::u32string& utf32_str);
std::u32string unicode_value_to_utf32(int unicode_value); std::u32string unicode_value_to_utf32(int unicode_value);
// std::string sd_basename(const std::string& path); // std::string sd_basename(const std::string& path);
typedef struct { sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index = 0);
uint32_t width;
uint32_t height;
uint32_t channel;
float* data;
} sd_image_f32_t;
void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]); sd::Tensor<float> sd_image_to_tensor(sd_image_t image,
int target_width = -1,
int target_height = -1,
bool scale = true);
sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image); sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height);
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height);
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height);
class MmapWrapper { class MmapWrapper {
public: public:

253
src/vae.hpp Normal file
View File

@ -0,0 +1,253 @@
#ifndef __VAE_HPP__
#define __VAE_HPP__
#include "common_block.hpp"
#include "tensor_ggml.hpp"
struct VAE : public GGMLRunner {
protected:
SDVersion version;
bool scale_input = true;
virtual sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z,
bool decode_graph) = 0;
static inline void scale_tensor_to_minus1_1(sd::Tensor<float>* tensor) {
GGML_ASSERT(tensor != nullptr);
for (int64_t i = 0; i < tensor->numel(); ++i) {
(*tensor)[i] = (*tensor)[i] * 2.0f - 1.0f;
}
}
static inline void scale_tensor_to_0_1(sd::Tensor<float>* tensor) {
GGML_ASSERT(tensor != nullptr);
for (int64_t i = 0; i < tensor->numel(); ++i) {
float value = ((*tensor)[i] + 1.0f) * 0.5f;
(*tensor)[i] = std::max(0.0f, std::min(1.0f, value));
}
}
sd::Tensor<float> tiled_compute(const sd::Tensor<float>& input,
int n_threads,
int output_width,
int output_height,
int scale,
int p_tile_size_x,
int p_tile_size_y,
float tile_overlap_factor,
bool circular_x,
bool circular_y,
bool decode_graph,
const char* error_message,
bool silent = false) {
auto on_processing = [&](const sd::Tensor<float>& input_tile) {
auto output_tile = _compute(n_threads, input_tile, decode_graph);
if (output_tile.empty()) {
LOG_ERROR("%s", error_message);
return sd::Tensor<float>();
}
return output_tile;
};
return ::process_tiles_2d(input,
output_width,
output_height,
scale,
p_tile_size_x,
p_tile_size_y,
tile_overlap_factor,
circular_x,
circular_y,
on_processing,
silent);
}
public:
VAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu)
: version(version), GGMLRunner(backend, offload_params_to_cpu) {}
int get_scale_factor() {
int scale_factor = 8;
if (version == VERSION_WAN2_2_TI2V) {
scale_factor = 16;
} else if (sd_version_is_flux2(version)) {
scale_factor = 16;
} else if (version == VERSION_CHROMA_RADIANCE) {
scale_factor = 1;
}
return scale_factor;
}
virtual int get_encoder_output_channels(int input_channels) = 0;
void get_tile_sizes(int& tile_size_x,
int& tile_size_y,
float& tile_overlap,
const sd_tiling_params_t& params,
int64_t latent_x,
int64_t latent_y,
float encoding_factor = 1.0f) {
tile_overlap = std::max(std::min(params.target_overlap, 0.5f), 0.0f);
auto get_tile_size = [&](int requested_size, float factor, int64_t latent_size) {
const int default_tile_size = 32;
const int min_tile_dimension = 4;
int tile_size = default_tile_size;
// factor <= 1 means simple fraction of the latent dimension
// factor > 1 means number of tiles across that dimension
if (factor > 0.f) {
if (factor > 1.0)
factor = 1 / (factor - factor * tile_overlap + tile_overlap);
tile_size = static_cast<int>(std::round(latent_size * factor));
} else if (requested_size >= min_tile_dimension) {
tile_size = requested_size;
}
tile_size = static_cast<int>(tile_size * encoding_factor);
return std::max(std::min(tile_size, static_cast<int>(latent_size)), min_tile_dimension);
};
tile_size_x = get_tile_size(params.tile_size_x, params.rel_size_x, latent_x);
tile_size_y = get_tile_size(params.tile_size_y, params.rel_size_y, latent_y);
}
sd::Tensor<float> encode(int n_threads,
const sd::Tensor<float>& x,
sd_tiling_params_t tiling_params,
bool circular_x = false,
bool circular_y = false) {
int64_t t0 = ggml_time_ms();
sd::Tensor<float> input = x;
sd::Tensor<float> output;
if (scale_input) {
scale_tensor_to_minus1_1(&input);
}
if (tiling_params.enabled) {
const int scale_factor = get_scale_factor();
int64_t W = input.shape()[0] / scale_factor;
int64_t H = input.shape()[1] / scale_factor;
float tile_overlap;
int tile_size_x, tile_size_y;
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, W, H, 1.30539f);
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
output = tiled_compute(input,
n_threads,
static_cast<int>(W),
static_cast<int>(H),
scale_factor,
tile_size_x,
tile_size_y,
tile_overlap,
circular_x,
circular_y,
false,
"vae encode compute failed while processing a tile");
} else {
output = _compute(n_threads, input, false);
free_compute_buffer();
}
if (output.empty()) {
LOG_ERROR("vae encode compute failed");
return {};
}
int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing vae encode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
return std::move(output);
}
sd::Tensor<float> decode(int n_threads,
const sd::Tensor<float>& x,
sd_tiling_params_t tiling_params,
bool decode_video = false,
bool circular_x = false,
bool circular_y = false,
bool silent = false) {
int64_t t0 = ggml_time_ms();
sd::Tensor<float> input = x;
sd::Tensor<float> output;
if (tiling_params.enabled) {
const int scale_factor = get_scale_factor();
int64_t W = input.shape()[0] * scale_factor;
int64_t H = input.shape()[1] * scale_factor;
float tile_overlap;
int tile_size_x, tile_size_y;
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, input.shape()[0], input.shape()[1]);
if (!silent) {
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
}
output = tiled_compute(
input,
n_threads,
static_cast<int>(W),
static_cast<int>(H),
scale_factor,
tile_size_x,
tile_size_y,
tile_overlap,
circular_x,
circular_y,
true,
"vae decode compute failed while processing a tile",
silent);
} else {
output = _compute(n_threads, input, true);
}
free_compute_buffer();
if (output.empty()) {
LOG_ERROR("vae decode compute failed");
return {};
}
if (scale_input) {
scale_tensor_to_0_1(&output);
}
int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing vae decode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
return std::move(output);
}
virtual sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) = 0;
virtual sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) = 0;
virtual sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) = 0;
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) = 0;
virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); };
};
struct FakeVAE : public VAE {
FakeVAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu)
: VAE(version, backend, offload_params_to_cpu) {}
int get_encoder_output_channels(int input_channels) {
return input_channels;
}
sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z,
bool decode_graph) override {
SD_UNUSED(n_threads);
SD_UNUSED(decode_graph);
return z;
}
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
SD_UNUSED(rng);
return vae_output;
}
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
return latents;
}
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
return latents;
}
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {}
std::string get_desc() override {
return "fake_vae";
}
};
#endif // __VAE_HPP__

View File

@ -1,4 +1,4 @@
static unsigned char merges_utf8_c_str[] = { static const unsigned char clip_merges_utf8_c_str[] = {
0x23, 0x23,
0x76, 0x76,
0x65, 0x65,
@ -524620,7 +524620,7 @@ static unsigned char merges_utf8_c_str[] = {
0x0a, 0x0a,
}; };
static unsigned char t5_tokenizer_json_str[] = { static const unsigned char t5_tokenizer_json_str[] = {
0x7b, 0x7b,
0x0a, 0x0a,
0x20, 0x20,

View File

@ -1,4 +1,4 @@
unsigned char mistral_merges_utf8_c_str[] = { static const unsigned char mistral_merges_utf8_c_str[] = {
0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0x20, 0x74, 0x0a, 0x65, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0x20, 0x74, 0x0a, 0x65,
0x20, 0x72, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x20, 0x72, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0xc4, 0xa0,
0xc4, 0xa0, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0,
@ -260614,7 +260614,7 @@ unsigned char mistral_merges_utf8_c_str[] = {
0xc3, 0xa5, 0xc4, 0xb2, 0xc4, 0xb0, 0x20, 0xc3, 0xa6, 0xc2, 0xb1, 0xc4, 0xc3, 0xa5, 0xc4, 0xb2, 0xc4, 0xb0, 0x20, 0xc3, 0xa6, 0xc2, 0xb1, 0xc4,
0xab, 0xc3, 0xa4, 0xc2, 0xb9, 0xc2, 0xa6, 0x0a, 0xab, 0xc3, 0xa4, 0xc2, 0xb9, 0xc2, 0xa6, 0x0a,
}; };
unsigned char mistral_vocab_json_utf8_c_str[] = { static const unsigned char mistral_vocab_json_utf8_c_str[] = {
0x7b, 0x22, 0x3c, 0x75, 0x6e, 0x6b, 0x3e, 0x22, 0x3a, 0x20, 0x30, 0x2c, 0x7b, 0x22, 0x3c, 0x75, 0x6e, 0x6b, 0x3e, 0x22, 0x3a, 0x20, 0x30, 0x2c,
0x20, 0x22, 0x3c, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x31, 0x2c, 0x20, 0x22, 0x20, 0x22, 0x3c, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x31, 0x2c, 0x20, 0x22,
0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x32, 0x2c, 0x20, 0x22, 0x5b, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x32, 0x2c, 0x20, 0x22, 0x5b,

View File

@ -1,4 +1,4 @@
unsigned char qwen2_merges_utf8_c_str[] = { static const unsigned char qwen2_merges_utf8_c_str[] = {
0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4,
0xa0, 0xc4, 0xa0, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0x74, 0xa0, 0xc4, 0xa0, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0x74,
0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0,

View File

@ -1,4 +1,4 @@
unsigned char umt5_tokenizer_json_str[] = { static const unsigned char umt5_tokenizer_json_str[] = {
0x7b, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20, 0x7b, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20,
0x22, 0x31, 0x2e, 0x30, 0x22, 0x2c, 0x20, 0x22, 0x74, 0x72, 0x75, 0x6e, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x2c, 0x20, 0x22, 0x74, 0x72, 0x75, 0x6e,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20, 0x6e, 0x75, 0x6c, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20, 0x6e, 0x75, 0x6c,

35
src/vocab/vocab.cpp Normal file
View File

@ -0,0 +1,35 @@
#include "vocab.h"
#include "clip_t5.hpp"
#include "mistral.hpp"
#include "qwen.hpp"
#include "umt5.hpp"
std::string load_clip_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(clip_merges_utf8_c_str), sizeof(clip_merges_utf8_c_str));
return merges_utf8_str;
}
std::string load_qwen2_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(qwen2_merges_utf8_c_str), sizeof(qwen2_merges_utf8_c_str));
return merges_utf8_str;
}
std::string load_mistral_merges() {
std::string merges_utf8_str(reinterpret_cast<const char*>(mistral_merges_utf8_c_str), sizeof(mistral_merges_utf8_c_str));
return merges_utf8_str;
}
std::string load_mistral_vocab_json() {
std::string json_str(reinterpret_cast<const char*>(mistral_vocab_json_utf8_c_str), sizeof(mistral_vocab_json_utf8_c_str));
return json_str;
}
std::string load_t5_tokenizer_json() {
std::string json_str(reinterpret_cast<const char*>(t5_tokenizer_json_str), sizeof(t5_tokenizer_json_str));
return json_str;
}
std::string load_umt5_tokenizer_json() {
std::string json_str(reinterpret_cast<const char*>(umt5_tokenizer_json_str), sizeof(umt5_tokenizer_json_str));
return json_str;
}

13
src/vocab/vocab.h Normal file
View File

@ -0,0 +1,13 @@
#ifndef __VOCAB_H__
#define __VOCAB_H__
#include <string>
std::string load_clip_merges();
std::string load_qwen2_merges();
std::string load_mistral_merges();
std::string load_mistral_vocab_json();
std::string load_t5_tokenizer_json();
std::string load_umt5_tokenizer_json();
#endif // __VOCAB_H__

View File

@ -5,9 +5,8 @@
#include <memory> #include <memory>
#include <utility> #include <utility>
#include "common.hpp" #include "common_block.hpp"
#include "flux.hpp" #include "flux.hpp"
#include "ggml_extend.hpp"
#include "rope.hpp" #include "rope.hpp"
#include "vae.hpp" #include "vae.hpp"
@ -26,7 +25,7 @@ namespace WAN {
std::tuple<int, int, int> dilation; std::tuple<int, int, int> dilation;
bool bias; bool bias;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
params["weight"] = ggml_new_tensor_4d(ctx, params["weight"] = ggml_new_tensor_4d(ctx,
GGML_TYPE_F16, GGML_TYPE_F16,
std::get<2>(kernel_size), std::get<2>(kernel_size),
@ -54,11 +53,11 @@ namespace WAN {
dilation(std::move(dilation)), dilation(std::move(dilation)),
bias(bias) {} bias(bias) {}
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* cache_x = nullptr) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* cache_x = nullptr) {
// x: [N*IC, ID, IH, IW] // x: [N*IC, ID, IH, IW]
// result: x: [N*OC, ID, IH, IW] // result: x: [N*OC, ID, IH, IW]
struct ggml_tensor* w = params["weight"]; ggml_tensor* w = params["weight"];
struct ggml_tensor* b = nullptr; ggml_tensor* b = nullptr;
if (bias) { if (bias) {
b = params["bias"]; b = params["bias"];
} }
@ -87,7 +86,7 @@ namespace WAN {
protected: protected:
int64_t dim; int64_t dim;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
ggml_type wtype = GGML_TYPE_F32; ggml_type wtype = GGML_TYPE_F32;
auto iter = tensor_storage_map.find(prefix + "gamma"); auto iter = tensor_storage_map.find(prefix + "gamma");
if (iter != tensor_storage_map.end()) { if (iter != tensor_storage_map.end()) {
@ -101,16 +100,16 @@ namespace WAN {
RMS_norm(int64_t dim) RMS_norm(int64_t dim)
: dim(dim) {} : dim(dim) {}
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
// x: [N*IC, ID, IH, IW], IC == dim // x: [N*IC, ID, IH, IW], IC == dim
// assert N == 1 // assert N == 1
struct ggml_tensor* w = params["gamma"]; ggml_tensor* w = params["gamma"];
w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w)); w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w));
auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC] auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC]
h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f); h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f);
h = ggml_mul(ctx->ggml_ctx, h, w); h = ggml_mul(ctx->ggml_ctx, h, w);
h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, h, 1, 2, 3, 0)); h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, h, 1, 2, 3, 0));
return h; return h;
} }
@ -149,12 +148,12 @@ namespace WAN {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b, int64_t b,
std::vector<struct ggml_tensor*>& feat_cache, std::vector<ggml_tensor*>& feat_cache,
int& feat_idx, int& feat_idx,
int chunk_idx) { int chunk_idx) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
int64_t c = x->ne[3] / b; int64_t c = x->ne[3] / b;
@ -255,9 +254,9 @@ namespace WAN {
GGML_ASSERT(in_channels * factor % out_channels == 0); GGML_ASSERT(in_channels * factor % out_channels == 0);
group_size = in_channels * factor / out_channels; group_size = in_channels * factor / out_channels;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t B = 1) { int64_t B = 1) {
// x: [B*IC, T, H, W] // x: [B*IC, T, H, W]
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s] // return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
GGML_ASSERT(B == 1); GGML_ASSERT(B == 1);
@ -302,10 +301,10 @@ namespace WAN {
GGML_ASSERT(out_channels * factor % in_channels == 0); GGML_ASSERT(out_channels * factor % in_channels == 0);
repeats = out_channels * factor / in_channels; repeats = out_channels * factor / in_channels;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
bool first_chunk = false, bool first_chunk = false,
int64_t B = 1) { int64_t B = 1) {
// x: [B*IC, T, H, W] // x: [B*IC, T, H, W]
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s] // return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
GGML_ASSERT(B == 1); GGML_ASSERT(B == 1);
@ -357,14 +356,14 @@ namespace WAN {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b, int64_t b,
std::vector<struct ggml_tensor*>& feat_cache, std::vector<ggml_tensor*>& feat_cache,
int& feat_idx) { int& feat_idx) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
struct ggml_tensor* h = x; ggml_tensor* h = x;
if (in_dim != out_dim) { if (in_dim != out_dim) {
auto shortcut = std::dynamic_pointer_cast<CausalConv3d>(blocks["shortcut"]); auto shortcut = std::dynamic_pointer_cast<CausalConv3d>(blocks["shortcut"]);
@ -431,15 +430,15 @@ namespace WAN {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b, int64_t b,
std::vector<struct ggml_tensor*>& feat_cache, std::vector<ggml_tensor*>& feat_cache,
int& feat_idx, int& feat_idx,
int chunk_idx) { int chunk_idx) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
struct ggml_tensor* x_copy = x; ggml_tensor* x_copy = x;
auto avg_shortcut = std::dynamic_pointer_cast<AvgDown3D>(blocks["avg_shortcut"]); auto avg_shortcut = std::dynamic_pointer_cast<AvgDown3D>(blocks["avg_shortcut"]);
@ -493,15 +492,15 @@ namespace WAN {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b, int64_t b,
std::vector<struct ggml_tensor*>& feat_cache, std::vector<ggml_tensor*>& feat_cache,
int& feat_idx, int& feat_idx,
int chunk_idx) { int chunk_idx) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
struct ggml_tensor* x_copy = x; ggml_tensor* x_copy = x;
int i = 0; int i = 0;
for (; i < mult; i++) { for (; i < mult; i++) {
@ -538,9 +537,9 @@ namespace WAN {
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Conv2d(dim, dim, {1, 1})); blocks["proj"] = std::shared_ptr<GGMLBlock>(new Conv2d(dim, dim, {1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b) { int64_t b) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
auto norm = std::dynamic_pointer_cast<RMS_norm>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<RMS_norm>(blocks["norm"]);
@ -572,8 +571,8 @@ namespace WAN {
auto v = qkv_vec[2]; auto v = qkv_vec[2];
v = ggml_reshape_3d(ctx->ggml_ctx, v, h * w, c, n); // [t, c, h * w] v = ggml_reshape_3d(ctx->ggml_ctx, v, h * w, c, n); // [t, c, h * w]
v = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, v, 1, 0, 2, 3)); // [t, h * w, c] v = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, v, 1, 0, 2, 3)); // [t, h * w, c]
x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, true, ctx->flash_attn_enabled); // [t, h * w, c] x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled); // [t, h * w, c]
x = ggml_ext_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 1, 0, 2, 3)); // [t, c, h * w] x = ggml_ext_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 1, 0, 2, 3)); // [t, c, h * w]
x = ggml_reshape_4d(ctx->ggml_ctx, x, w, h, c, n); // [t, c, h, w] x = ggml_reshape_4d(ctx->ggml_ctx, x, w, h, c, n); // [t, c, h, w]
@ -660,12 +659,12 @@ namespace WAN {
blocks["head.2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(out_dim, z_dim, {3, 3, 3}, {1, 1, 1}, {1, 1, 1})); blocks["head.2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(out_dim, z_dim, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b, int64_t b,
std::vector<struct ggml_tensor*>& feat_cache, std::vector<ggml_tensor*>& feat_cache,
int& feat_idx, int& feat_idx,
int chunk_idx) { int chunk_idx) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]); auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
@ -831,12 +830,12 @@ namespace WAN {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b, int64_t b,
std::vector<struct ggml_tensor*>& feat_cache, std::vector<ggml_tensor*>& feat_cache,
int& feat_idx, int& feat_idx,
int chunk_idx) { int chunk_idx) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]); auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
@ -935,16 +934,16 @@ namespace WAN {
int _conv_num = 33; int _conv_num = 33;
int _conv_idx = 0; int _conv_idx = 0;
std::vector<struct ggml_tensor*> _feat_map; std::vector<ggml_tensor*> _feat_map;
int _enc_conv_num = 28; int _enc_conv_num = 28;
int _enc_conv_idx = 0; int _enc_conv_idx = 0;
std::vector<struct ggml_tensor*> _enc_feat_map; std::vector<ggml_tensor*> _enc_feat_map;
void clear_cache() { void clear_cache() {
_conv_idx = 0; _conv_idx = 0;
_feat_map = std::vector<struct ggml_tensor*>(_conv_num, nullptr); _feat_map = std::vector<ggml_tensor*>(_conv_num, nullptr);
_enc_conv_idx = 0; _enc_conv_idx = 0;
_enc_feat_map = std::vector<struct ggml_tensor*>(_enc_conv_num, nullptr); _enc_feat_map = std::vector<ggml_tensor*>(_enc_conv_num, nullptr);
} }
public: public:
@ -967,10 +966,10 @@ namespace WAN {
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(z_dim, z_dim, {1, 1, 1})); blocks["conv2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(z_dim, z_dim, {1, 1, 1}));
} }
struct ggml_tensor* patchify(struct ggml_context* ctx, ggml_tensor* patchify(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t patch_size, int64_t patch_size,
int64_t b = 1) { int64_t b = 1) {
// x: [b*c, f, h*q, w*r] // x: [b*c, f, h*q, w*r]
// return: [b*c*r*q, f, h, w] // return: [b*c*r*q, f, h, w]
if (patch_size == 1) { if (patch_size == 1) {
@ -994,10 +993,10 @@ namespace WAN {
return x; return x;
} }
struct ggml_tensor* unpatchify(struct ggml_context* ctx, ggml_tensor* unpatchify(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t patch_size, int64_t patch_size,
int64_t b = 1) { int64_t b = 1) {
// x: [b*c*r*q, f, h, w] // x: [b*c*r*q, f, h, w]
// return: [b*c, f, h*q, w*r] // return: [b*c, f, h*q, w*r]
if (patch_size == 1) { if (patch_size == 1) {
@ -1020,9 +1019,9 @@ namespace WAN {
return x; return x;
} }
struct ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* encode(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t b = 1) { int64_t b = 1) {
// x: [b*c, t, h, w] // x: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
GGML_ASSERT(decode_only == false); GGML_ASSERT(decode_only == false);
@ -1038,7 +1037,7 @@ namespace WAN {
int64_t t = x->ne[2]; int64_t t = x->ne[2];
int64_t iter_ = 1 + (t - 1) / 4; int64_t iter_ = 1 + (t - 1) / 4;
struct ggml_tensor* out; ggml_tensor* out;
for (int i = 0; i < iter_; i++) { for (int i = 0; i < iter_; i++) {
_enc_conv_idx = 0; _enc_conv_idx = 0;
if (i == 0) { if (i == 0) {
@ -1056,9 +1055,9 @@ namespace WAN {
return mu; return mu;
} }
struct ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* decode(GGMLRunnerContext* ctx,
struct ggml_tensor* z, ggml_tensor* z,
int64_t b = 1) { int64_t b = 1) {
// z: [b*c, t, h, w] // z: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
@ -1069,7 +1068,7 @@ namespace WAN {
int64_t iter_ = z->ne[2]; int64_t iter_ = z->ne[2];
auto x = conv2->forward(ctx, z); auto x = conv2->forward(ctx, z);
struct ggml_tensor* out; ggml_tensor* out;
for (int i = 0; i < iter_; i++) { for (int i = 0; i < iter_; i++) {
_conv_idx = 0; _conv_idx = 0;
if (i == 0) { if (i == 0) {
@ -1088,10 +1087,10 @@ namespace WAN {
return out; return out;
} }
struct ggml_tensor* decode_partial(GGMLRunnerContext* ctx, ggml_tensor* decode_partial(GGMLRunnerContext* ctx,
struct ggml_tensor* z, ggml_tensor* z,
int i, int i,
int64_t b = 1) { int64_t b = 1) {
// z: [b*c, t, h, w] // z: [b*c, t, h, w]
GGML_ASSERT(b == 1); GGML_ASSERT(b == 1);
@ -1110,7 +1109,8 @@ namespace WAN {
}; };
struct WanVAERunner : public VAE { struct WanVAERunner : public VAE {
bool decode_only = true; float scale_factor = 1.0f;
bool decode_only = true;
WanVAE ae; WanVAE ae;
WanVAERunner(ggml_backend_t backend, WanVAERunner(ggml_backend_t backend,
@ -1119,7 +1119,7 @@ namespace WAN {
const std::string prefix = "", const std::string prefix = "",
bool decode_only = false, bool decode_only = false,
SDVersion version = VERSION_WAN2) SDVersion version = VERSION_WAN2)
: decode_only(decode_only), ae(decode_only, version == VERSION_WAN2_2_TI2V), VAE(backend, offload_params_to_cpu) { : decode_only(decode_only), ae(decode_only, version == VERSION_WAN2_2_TI2V), VAE(version, backend, offload_params_to_cpu) {
ae.init(params_ctx, tensor_storage_map, prefix); ae.init(params_ctx, tensor_storage_map, prefix);
} }
@ -1127,26 +1127,82 @@ namespace WAN {
return "wan_vae"; return "wan_vae";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {
ae.get_param_tensors(tensors, prefix); ae.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) { sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
struct ggml_cgraph* gf = new_graph_custom(10240 * z->ne[2]); SD_UNUSED(rng);
return vae_output;
}
z = to_backend(z); std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents) {
int channel_dim = latents.dim() == 5 ? 3 : 2;
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1);
if (latents.shape()[channel_dim] == 16) { // Wan2.1 VAE
stats_shape[static_cast<size_t>(channel_dim)] = 16;
auto mean_tensor = sd::Tensor<float>::from_vector({-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f,
0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f,
3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
}
if (latents.shape()[channel_dim] == 48) { // Wan2.2 VAE
stats_shape[static_cast<size_t>(channel_dim)] = 48;
auto mean_tensor = sd::Tensor<float>::from_vector({-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
-0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
-0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
-0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
-0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
}
GGML_ABORT("unexpected latent channel dimension %lld for version %d",
(long long)latents.shape()[channel_dim],
version);
}
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents);
return (latents * std_tensor) / scale_factor + mean_tensor;
}
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents);
return ((latents - mean_tensor) * scale_factor) / std_tensor;
}
int get_encoder_output_channels(int input_channels) {
return static_cast<int>(ae.z_dim);
}
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = new_graph_custom(10240 * z_tensor.shape()[2]);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z); ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int i) { ggml_cgraph* build_graph_partial(const sd::Tensor<float>& z_tensor, bool decode_graph, int i) {
struct ggml_cgraph* gf = new_graph_custom(20480); ggml_cgraph* gf = new_graph_custom(20480);
ae.clear_cache(); ae.clear_cache();
@ -1155,11 +1211,11 @@ namespace WAN {
ae._feat_map[feat_idx] = feat_cache; ae._feat_map[feat_idx] = feat_cache;
} }
z = to_backend(z); ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z); ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z);
for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) {
ggml_tensor* feat_cache = ae._feat_map[feat_idx]; ggml_tensor* feat_cache = ae._feat_map[feat_idx];
@ -1174,86 +1230,85 @@ namespace WAN {
return gf; return gf;
} }
bool compute(const int n_threads, sd::Tensor<float> _compute(const int n_threads,
struct ggml_tensor* z, const sd::Tensor<float>& z,
bool decode_graph, bool decode_graph) override {
struct ggml_tensor** output,
struct ggml_context* output_ctx = nullptr) override {
if (true) { if (true) {
auto get_graph = [&]() -> struct ggml_cgraph* { sd::Tensor<float> input;
return build_graph(z, decode_graph); if (z.dim() == 4) {
}; input = z.unsqueeze(2);
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
} else { // chunk 1 result is weird
ae.clear_cache();
int64_t t = z->ne[2];
int i = 0;
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph_partial(z, decode_graph, i);
};
struct ggml_tensor* out = nullptr;
bool res = GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
ae.clear_cache();
if (t == 1) {
*output = out;
return res;
} }
auto get_graph = [&]() -> ggml_cgraph* {
*output = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], (t - 1) * 4 + 1, out->ne[3]); if (input.empty()) {
return build_graph(z, decode_graph);
auto copy_to_output = [&]() { } else {
for (int64_t i3 = 0; i3 < out->ne[3]; i3++) { return build_graph(input, decode_graph);
for (int64_t i2 = 0; i2 < out->ne[2]; i2++) {
for (int64_t i1 = 0; i1 < out->ne[1]; i1++) {
for (int64_t i0 = 0; i0 < out->ne[0]; i0++) {
float value = ggml_ext_tensor_get_f32(out, i0, i1, i2, i3);
int64_t offset = (i == 0) ? 0 : (1 + (i - 1) * 4);
ggml_ext_tensor_set_f32(*output, value, i0, i1, offset + i2, i3);
}
}
}
} }
}; };
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, true),
input.empty() ? z.dim() : input.dim());
if (!result.empty() && z.dim() == 4) {
result.squeeze_(2);
}
return result;
} else { // chunk 1 result is weird
ae.clear_cache();
int64_t t = z.shape()[2];
int i = 0;
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph_partial(z, decode_graph, i);
};
auto out_opt = GGMLRunner::compute<float>(get_graph, n_threads, true);
if (!out_opt.has_value()) {
return {};
}
sd::Tensor<float> out = std::move(*out_opt);
ae.clear_cache();
if (t == 1) {
return out;
}
copy_to_output(); sd::Tensor<float> output = std::move(out);
out = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], 4, out->ne[3]);
for (i = 1; i < t; i++) { for (i = 1; i < t; i++) {
res = res || GGMLRunner::compute(get_graph, n_threads, true, &out); auto chunk_opt = GGMLRunner::compute<float>(get_graph, n_threads, true);
if (!chunk_opt.has_value()) {
return {};
}
out = std::move(*chunk_opt);
ae.clear_cache(); ae.clear_cache();
copy_to_output(); output = sd::ops::concat(output, out, 2);
} }
free_cache_ctx_and_buffer(); free_cache_ctx_and_buffer();
return res; return output;
} }
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
if (true) { if (true) {
// cpu f32, pass // cpu f32, pass
// cpu f16, pass // cpu f16, pass
// cuda f16, pass // cuda f16, pass
// cuda f32, pass // cuda f32, pass
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 2, 16); auto z = sd::load_tensor_from_file_as_tensor<float>("wan_vae_z.bin");
ggml_set_f32(z, 0.5f); print_sd_tensor(z);
z = load_tensor_from_file(work_ctx, "wan_vae_z.bin"); sd::Tensor<float> out;
print_ggml_tensor(z);
struct ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, z, true, &out, work_ctx); auto out_opt = _compute(8, z, true);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("decode test done in %ldms", t1 - t0); LOG_DEBUG("decode test done in %ldms", t1 - t0);
} }
}; };
@ -1315,10 +1370,10 @@ namespace WAN {
} }
} }
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr) { ggml_tensor* mask = nullptr) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// pe: [n_token, d_head/2, 2, 2] // pe: [n_token, d_head/2, 2, 2]
// return [N, n_token, dim] // return [N, n_token, dim]
@ -1356,10 +1411,10 @@ namespace WAN {
bool qk_norm = true, bool qk_norm = true,
float eps = 1e-6) float eps = 1e-6)
: WanSelfAttention(dim, num_heads, qk_norm, eps) {} : WanSelfAttention(dim, num_heads, qk_norm, eps) {}
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context, ggml_tensor* context,
int64_t context_img_len) = 0; int64_t context_img_len) = 0;
}; };
class WanT2VCrossAttention : public WanCrossAttention { class WanT2VCrossAttention : public WanCrossAttention {
@ -1369,10 +1424,10 @@ namespace WAN {
bool qk_norm = true, bool qk_norm = true,
float eps = 1e-6) float eps = 1e-6)
: WanCrossAttention(dim, num_heads, qk_norm, eps) {} : WanCrossAttention(dim, num_heads, qk_norm, eps) {}
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context, ggml_tensor* context,
int64_t context_img_len) override { int64_t context_img_len) override {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// context: [N, n_context, dim] // context: [N, n_context, dim]
// context_img_len: unused // context_img_len: unused
@ -1417,10 +1472,10 @@ namespace WAN {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* context, ggml_tensor* context,
int64_t context_img_len) override { int64_t context_img_len) override {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// context: [N, context_img_len + context_txt_len, dim] // context: [N, context_img_len + context_txt_len, dim]
// return [N, n_token, dim] // return [N, n_token, dim]
@ -1465,7 +1520,7 @@ namespace WAN {
} }
}; };
static struct ggml_tensor* modulate_add(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* e) { static ggml_tensor* modulate_add(ggml_context* ctx, ggml_tensor* x, ggml_tensor* e) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// e: [N, 1, dim] or [N, T, 1, dim] // e: [N, 1, dim] or [N, T, 1, dim]
if (ggml_n_dims(e) == 3) { if (ggml_n_dims(e) == 3) {
@ -1479,7 +1534,7 @@ namespace WAN {
return x; return x;
} }
static struct ggml_tensor* modulate_mul(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* e) { static ggml_tensor* modulate_mul(ggml_context* ctx, ggml_tensor* x, ggml_tensor* e) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// e: [N, 1, dim] or [N, T, 1, dim] // e: [N, 1, dim] or [N, T, 1, dim]
if (ggml_n_dims(e) == 3) { if (ggml_n_dims(e) == 3) {
@ -1497,7 +1552,7 @@ namespace WAN {
protected: protected:
int64_t dim; int64_t dim;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1); params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
} }
@ -1531,12 +1586,12 @@ namespace WAN {
blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim)); blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim));
} }
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* e, ggml_tensor* e,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* context, ggml_tensor* context,
int64_t context_img_len = 257) { int64_t context_img_len = 257) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// e: [N, 6, dim] or [N, T, 6, dim] // e: [N, 6, dim] or [N, T, 6, dim]
// context: [N, context_img_len + context_txt_len, dim] // context: [N, context_img_len + context_txt_len, dim]
@ -1585,7 +1640,7 @@ namespace WAN {
class VaceWanAttentionBlock : public WanAttentionBlock { class VaceWanAttentionBlock : public WanAttentionBlock {
protected: protected:
int block_id; int block_id;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1); params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
} }
@ -1607,11 +1662,11 @@ namespace WAN {
} }
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx, std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
struct ggml_tensor* c, ggml_tensor* c,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* e, ggml_tensor* e,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* context, ggml_tensor* context,
int64_t context_img_len = 257) { int64_t context_img_len = 257) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// e: [N, 6, dim] or [N, T, 6, dim] // e: [N, 6, dim] or [N, T, 6, dim]
@ -1637,7 +1692,7 @@ namespace WAN {
protected: protected:
int64_t dim; int64_t dim;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1); params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1);
} }
@ -1654,9 +1709,9 @@ namespace WAN {
blocks["head"] = std::shared_ptr<GGMLBlock>(new Linear(dim, out_dim)); blocks["head"] = std::shared_ptr<GGMLBlock>(new Linear(dim, out_dim));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* e) { ggml_tensor* e) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// e: [N, dim] or [N, T, dim] // e: [N, dim] or [N, T, dim]
// return [N, n_token, out_dim] // return [N, n_token, out_dim]
@ -1684,7 +1739,7 @@ namespace WAN {
int64_t in_dim; int64_t in_dim;
int64_t flf_pos_embed_token_number; int64_t flf_pos_embed_token_number;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
if (flf_pos_embed_token_number > 0) { if (flf_pos_embed_token_number > 0) {
params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1); params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1);
} }
@ -1702,8 +1757,8 @@ namespace WAN {
blocks["proj.4"] = std::shared_ptr<GGMLBlock>(new LayerNorm(out_dim)); blocks["proj.4"] = std::shared_ptr<GGMLBlock>(new LayerNorm(out_dim));
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* image_embeds) { ggml_tensor* image_embeds) {
if (flf_pos_embed_token_number > 0) { if (flf_pos_embed_token_number > 0) {
auto emb_pos = params["emb_pos"]; auto emb_pos = params["emb_pos"];
@ -1822,8 +1877,8 @@ namespace WAN {
} }
} }
struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x) {
int64_t W = x->ne[0]; int64_t W = x->ne[0];
int64_t H = x->ne[1]; int64_t H = x->ne[1];
int64_t T = x->ne[2]; int64_t T = x->ne[2];
@ -1835,11 +1890,11 @@ namespace WAN {
return x; return x;
} }
struct ggml_tensor* unpatchify(struct ggml_context* ctx, ggml_tensor* unpatchify(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
int64_t t_len, int64_t t_len,
int64_t h_len, int64_t h_len,
int64_t w_len) { int64_t w_len) {
// x: [N, t_len*h_len*w_len, pt*ph*pw*C] // x: [N, t_len*h_len*w_len, pt*ph*pw*C]
// return: [N*C, t_len*pt, h_len*ph, w_len*pw] // return: [N*C, t_len*pt, h_len*ph, w_len*pw]
int64_t N = x->ne[3]; int64_t N = x->ne[3];
@ -1862,15 +1917,15 @@ namespace WAN {
return x; return x;
} }
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx, ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* clip_fea = nullptr, ggml_tensor* clip_fea = nullptr,
struct ggml_tensor* vace_context = nullptr, ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f, float vace_strength = 1.f,
int64_t N = 1) { int64_t N = 1) {
// x: [N*C, T, H, W], C => in_dim // x: [N*C, T, H, W], C => in_dim
// vace_context: [N*vace_in_dim, T, H, W] // vace_context: [N*vace_in_dim, T, H, W]
// timestep: [N,] or [T] // timestep: [N,] or [T]
@ -1956,16 +2011,16 @@ namespace WAN {
return x; return x;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* clip_fea = nullptr, ggml_tensor* clip_fea = nullptr,
struct ggml_tensor* time_dim_concat = nullptr, ggml_tensor* time_dim_concat = nullptr,
struct ggml_tensor* vace_context = nullptr, ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f, float vace_strength = 1.f,
int64_t N = 1) { int64_t N = 1) {
// Forward pass of DiT. // Forward pass of DiT.
// x: [N*C, T, H, W] // x: [N*C, T, H, W]
// timestep: [N,] // timestep: [N,]
@ -2130,27 +2185,27 @@ namespace WAN {
return desc; return desc;
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
wan.get_param_tensors(tensors, prefix); wan.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor = {},
struct ggml_tensor* clip_fea = nullptr, const sd::Tensor<float>& clip_fea_tensor = {},
struct ggml_tensor* c_concat = nullptr, const sd::Tensor<float>& c_concat_tensor = {},
struct ggml_tensor* time_dim_concat = nullptr, const sd::Tensor<float>& time_dim_concat_tensor = {},
struct ggml_tensor* vace_context = nullptr, const sd::Tensor<float>& vace_context_tensor = {},
float vace_strength = 1.f) { float vace_strength = 1.f) {
struct ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
x = to_backend(x); ggml_tensor* x = make_input(x_tensor);
timesteps = to_backend(timesteps); ggml_tensor* timesteps = make_input(timesteps_tensor);
context = to_backend(context); ggml_tensor* context = make_optional_input(context_tensor);
clip_fea = to_backend(clip_fea); ggml_tensor* clip_fea = make_optional_input(clip_fea_tensor);
c_concat = to_backend(c_concat); ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
time_dim_concat = to_backend(time_dim_concat); ggml_tensor* time_dim_concat = make_optional_input(time_dim_concat_tensor);
vace_context = to_backend(vace_context); ggml_tensor* vace_context = make_optional_input(vace_context_tensor);
pe_vec = Rope::gen_wan_pe(static_cast<int>(x->ne[2]), pe_vec = Rope::gen_wan_pe(static_cast<int>(x->ne[2]),
static_cast<int>(x->ne[1]), static_cast<int>(x->ne[1]),
@ -2175,75 +2230,75 @@ namespace WAN {
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = wan.forward(&runner_ctx, ggml_tensor* out = wan.forward(&runner_ctx,
x, x,
timesteps, timesteps,
context, context,
pe, pe,
clip_fea, clip_fea,
time_dim_concat, time_dim_concat,
vace_context, vace_context,
vace_strength); vace_strength);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context = {},
struct ggml_tensor* clip_fea = nullptr, const sd::Tensor<float>& clip_fea = {},
struct ggml_tensor* c_concat = nullptr, const sd::Tensor<float>& c_concat = {},
struct ggml_tensor* time_dim_concat = nullptr, const sd::Tensor<float>& time_dim_concat = {},
struct ggml_tensor* vace_context = nullptr, const sd::Tensor<float>& vace_context = {},
float vace_strength = 1.f, float vace_strength = 1.f) {
struct ggml_tensor** output = nullptr, auto get_graph = [&]() -> ggml_cgraph* {
struct ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength); return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// cpu f16: pass // cpu f16: pass
// cuda f16: pass // cuda f16: pass
// cpu q8_0: pass // cpu q8_0: pass
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 1, 16); // auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 104, 60, 1, 16);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
auto x = load_tensor_from_file(work_ctx, "wan_dit_x.bin"); auto x = sd::load_tensor_from_file_as_tensor<float>("wan_dit_x.bin");
print_ggml_tensor(x); print_sd_tensor(x);
std::vector<float> timesteps_vec(3, 1000.f); std::vector<float> timesteps_vec(3, 1000.f);
timesteps_vec[0] = 0.f; timesteps_vec[0] = 0.f;
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec); auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 512, 1); // auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 4096, 512, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
auto context = load_tensor_from_file(work_ctx, "wan_dit_context.bin"); auto context = sd::load_tensor_from_file_as_tensor<float>("wan_dit_context.bin");
print_ggml_tensor(context); print_sd_tensor(context);
// auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin"); // auto clip_fea = load_tensor_from_file(ctx, "wan_dit_clip_fea.bin");
// print_ggml_tensor(clip_fea); // print_ggml_tensor(clip_fea);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx); auto out_opt = compute(8, x, timesteps, context, {}, {}, {}, {}, 1.f);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("wan test done in %lldms", t1 - t0); LOG_DEBUG("wan test done in %lldms", t1 - t0);
} }
} }

View File

@ -42,10 +42,10 @@ namespace ZImage {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr) { ggml_tensor* mask = nullptr) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
int64_t n_token = x->ne[1]; int64_t n_token = x->ne[1];
int64_t N = x->ne[2]; int64_t N = x->ne[2];
@ -124,23 +124,23 @@ namespace ZImage {
blocks["w3"] = std::make_shared<Linear>(dim, hidden_dim, false); blocks["w3"] = std::make_shared<Linear>(dim, hidden_dim, false);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
auto w1 = std::dynamic_pointer_cast<Linear>(blocks["w1"]); auto w1 = std::dynamic_pointer_cast<Linear>(blocks["w1"]);
auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]); auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]);
auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]); auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]);
auto x1 = w1->forward(ctx, x); auto x1 = w1->forward(ctx, x);
auto x3 = w3->forward(ctx, x); auto x3 = w3->forward(ctx, x);
x = ggml_mul(ctx->ggml_ctx, ggml_silu(ctx->ggml_ctx, x1), x3); x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3);
x = w2->forward(ctx, x); x = w2->forward(ctx, x);
return x; return x;
} }
}; };
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx, __STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* scale) { ggml_tensor* scale) {
// x: [N, L, C] // x: [N, L, C]
// scale: [N, C] // scale: [N, C]
scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C] scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C]
@ -175,11 +175,11 @@ namespace ZImage {
} }
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* pe, ggml_tensor* pe,
struct ggml_tensor* mask = nullptr, ggml_tensor* mask = nullptr,
struct ggml_tensor* adaln_input = nullptr) { ggml_tensor* adaln_input = nullptr) {
auto attention = std::dynamic_pointer_cast<JointAttention>(blocks["attention"]); auto attention = std::dynamic_pointer_cast<JointAttention>(blocks["attention"]);
auto feed_forward = std::dynamic_pointer_cast<FeedForward>(blocks["feed_forward"]); auto feed_forward = std::dynamic_pointer_cast<FeedForward>(blocks["feed_forward"]);
auto attention_norm1 = std::dynamic_pointer_cast<RMSNorm>(blocks["attention_norm1"]); auto attention_norm1 = std::dynamic_pointer_cast<RMSNorm>(blocks["attention_norm1"]);
@ -241,9 +241,9 @@ namespace ZImage {
blocks["adaLN_modulation.1"] = std::make_shared<Linear>(MIN(hidden_size, ADALN_EMBED_DIM), hidden_size); blocks["adaLN_modulation.1"] = std::make_shared<Linear>(MIN(hidden_size, ADALN_EMBED_DIM), hidden_size);
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* c) { ggml_tensor* c) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// c: [N, hidden_size] // c: [N, hidden_size]
// return: [N, n_token, patch_size * patch_size * out_channels] // return: [N, n_token, patch_size * patch_size * out_channels]
@ -284,7 +284,7 @@ namespace ZImage {
protected: protected:
ZImageParams z_image_params; ZImageParams z_image_params;
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override { void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
params["cap_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size); params["cap_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
params["x_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size); params["x_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
} }
@ -346,74 +346,11 @@ namespace ZImage {
blocks["final_layer"] = std::make_shared<FinalLayer>(z_image_params.hidden_size, z_image_params.patch_size, z_image_params.out_channels); blocks["final_layer"] = std::make_shared<FinalLayer>(z_image_params.hidden_size, z_image_params.patch_size, z_image_params.out_channels);
} }
struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, ggml_tensor* forward_core(GGMLRunnerContext* ctx,
struct ggml_tensor* x) { ggml_tensor* x,
int64_t W = x->ne[0]; ggml_tensor* timestep,
int64_t H = x->ne[1]; ggml_tensor* context,
ggml_tensor* pe) {
int pad_h = (z_image_params.patch_size - H % z_image_params.patch_size) % z_image_params.patch_size;
int pad_w = (z_image_params.patch_size - W % z_image_params.patch_size) % z_image_params.patch_size;
x = ggml_ext_pad(ctx->ggml_ctx, x, pad_w, pad_h, 0, 0, ctx->circular_x_enabled, ctx->circular_y_enabled);
return x;
}
struct ggml_tensor* patchify(struct ggml_context* ctx,
struct ggml_tensor* x) {
// x: [N, C, H, W]
// return: [N, h*w, patch_size*patch_size*C]
int64_t N = x->ne[3];
int64_t C = x->ne[2];
int64_t H = x->ne[1];
int64_t W = x->ne[0];
int64_t p = z_image_params.patch_size;
int64_t h = H / z_image_params.patch_size;
int64_t w = W / z_image_params.patch_size;
GGML_ASSERT(h * p == H && w * p == W);
x = ggml_reshape_4d(ctx, x, p, w, p, h * C * N); // [N*C*h, p, w, p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, w, p, p]
x = ggml_reshape_4d(ctx, x, p * p, w * h, C, N); // [N, C, h*w, p*p]
x = ggml_cont(ctx, ggml_ext_torch_permute(ctx, x, 2, 0, 1, 3)); // [N, h*w, C, p*p]
x = ggml_reshape_3d(ctx, x, C * p * p, w * h, N); // [N, h*w, p*p*C]
return x;
}
struct ggml_tensor* process_img(GGMLRunnerContext* ctx,
struct ggml_tensor* x) {
x = pad_to_patch_size(ctx, x);
x = patchify(ctx->ggml_ctx, x);
return x;
}
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
struct ggml_tensor* x,
int64_t h,
int64_t w) {
// x: [N, h*w, patch_size*patch_size*C]
// return: [N, C, H, W]
int64_t N = x->ne[2];
int64_t C = x->ne[0] / z_image_params.patch_size / z_image_params.patch_size;
int64_t H = h * z_image_params.patch_size;
int64_t W = w * z_image_params.patch_size;
int64_t p = z_image_params.patch_size;
GGML_ASSERT(C * p * p == x->ne[0]);
x = ggml_reshape_4d(ctx, x, C, p * p, w * h, N); // [N, h*w, p*p, C]
x = ggml_cont(ctx, ggml_ext_torch_permute(ctx, x, 1, 2, 0, 3)); // [N, C, h*w, p*p]
x = ggml_reshape_4d(ctx, x, p, p, w, h * C * N); // [N*C*h, w, p, p]
x = ggml_cont(ctx, ggml_permute(ctx, x, 0, 2, 1, 3)); // [N*C*h, p, w, p]
x = ggml_reshape_4d(ctx, x, W, H, C, N); // [N, C, h*p, w*p]
return x;
}
struct ggml_tensor* forward_core(GGMLRunnerContext* ctx,
struct ggml_tensor* x,
struct ggml_tensor* timestep,
struct ggml_tensor* context,
struct ggml_tensor* pe) {
auto x_embedder = std::dynamic_pointer_cast<Linear>(blocks["x_embedder"]); auto x_embedder = std::dynamic_pointer_cast<Linear>(blocks["x_embedder"]);
auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]); auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]);
auto cap_embedder_0 = std::dynamic_pointer_cast<RMSNorm>(blocks["cap_embedder.0"]); auto cap_embedder_0 = std::dynamic_pointer_cast<RMSNorm>(blocks["cap_embedder.0"]);
@ -477,12 +414,12 @@ namespace ZImage {
return img; return img;
} }
struct ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* forward(GGMLRunnerContext* ctx,
struct ggml_tensor* x, ggml_tensor* x,
struct ggml_tensor* timestep, ggml_tensor* timestep,
struct ggml_tensor* context, ggml_tensor* context,
struct ggml_tensor* pe, ggml_tensor* pe,
std::vector<ggml_tensor*> ref_latents = {}) { std::vector<ggml_tensor*> ref_latents = {}) {
// Forward pass of DiT. // Forward pass of DiT.
// x: [N, C, H, W] // x: [N, C, H, W]
// timestep: [N,] // timestep: [N,]
@ -495,27 +432,22 @@ namespace ZImage {
int64_t C = x->ne[2]; int64_t C = x->ne[2];
int64_t N = x->ne[3]; int64_t N = x->ne[3];
auto img = process_img(ctx, x); int patch_size = z_image_params.patch_size;
auto img = DiT::pad_and_patchify(ctx, x, patch_size, patch_size, false);
uint64_t n_img_token = img->ne[1]; uint64_t n_img_token = img->ne[1];
if (ref_latents.size() > 0) { if (ref_latents.size() > 0) {
for (ggml_tensor* ref : ref_latents) { for (ggml_tensor* ref : ref_latents) {
ref = process_img(ctx, ref); ref = DiT::pad_and_patchify(ctx, ref, patch_size, patch_size, false);
img = ggml_concat(ctx->ggml_ctx, img, ref, 1); img = ggml_concat(ctx->ggml_ctx, img, ref, 1);
} }
} }
int64_t h_len = ((H + (z_image_params.patch_size / 2)) / z_image_params.patch_size);
int64_t w_len = ((W + (z_image_params.patch_size / 2)) / z_image_params.patch_size);
auto out = forward_core(ctx, img, timestep, context, pe); auto out = forward_core(ctx, img, timestep, context, pe);
out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, n_img_token); // [N, n_img_token, ph*pw*C] out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, n_img_token); // [N, n_img_token, ph*pw*C]
out = unpatchify(ctx->ggml_ctx, out, h_len, w_len); // [N, C, H + pad_h, W + pad_w] out = DiT::unpatchify_and_crop(ctx->ggml_ctx, out, H, W, patch_size, patch_size, false); // [N, C, H, W]
// slice
out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, H); // [N, C, H, W + pad_w]
out = ggml_ext_slice(ctx->ggml_ctx, out, 0, 0, W); // [N, C, H, W]
out = ggml_ext_scale(ctx->ggml_ctx, out, -1.f); out = ggml_ext_scale(ctx->ggml_ctx, out, -1.f);
@ -545,24 +477,25 @@ namespace ZImage {
return "z_image"; return "z_image";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
z_image.get_param_tensors(tensors, prefix); z_image.get_param_tensors(tensors, prefix);
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* x, ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps_tensor,
struct ggml_tensor* context, const sd::Tensor<float>& context_tensor,
std::vector<ggml_tensor*> ref_latents = {}, const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
bool increase_ref_index = false) { bool increase_ref_index = false) {
ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
struct ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE); GGML_ASSERT(!context_tensor.empty());
ggml_tensor* context = make_input(context_tensor);
x = to_backend(x); std::vector<ggml_tensor*> ref_latents;
context = to_backend(context); ref_latents.reserve(ref_latents_tensor.size());
timesteps = to_backend(timesteps); for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
} }
pe_vec = Rope::gen_z_image_pe(static_cast<int>(x->ne[1]), pe_vec = Rope::gen_z_image_pe(static_cast<int>(x->ne[1]),
@ -586,66 +519,71 @@ namespace ZImage {
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
auto runner_ctx = get_context(); auto runner_ctx = get_context();
struct ggml_tensor* out = z_image.forward(&runner_ctx, ggml_tensor* out = z_image.forward(&runner_ctx,
x, x,
timesteps, timesteps,
context, context,
pe, pe,
ref_latents); ref_latents);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
bool compute(int n_threads, sd::Tensor<float> compute(int n_threads,
struct ggml_tensor* x, const sd::Tensor<float>& x,
struct ggml_tensor* timesteps, const sd::Tensor<float>& timesteps,
struct ggml_tensor* context, const sd::Tensor<float>& context,
std::vector<ggml_tensor*> ref_latents = {}, const std::vector<sd::Tensor<float>>& ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, ref_latents, increase_ref_index); return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
}; };
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
} }
void test() { void test() {
struct ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); ggml_context* ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr); GGML_ASSERT(ctx != nullptr);
{ {
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1); // auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
auto x = load_tensor_from_file(work_ctx, "./z_image_x.bin"); auto x = sd::load_tensor_from_file_as_tensor<float>("./z_image_x.bin");
print_ggml_tensor(x); print_sd_tensor(x);
std::vector<float> timesteps_vec(1, 0.f); std::vector<float> timesteps_vec(1, 0.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec); auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 2560, 256, 1); // auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 2560, 256, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
auto context = load_tensor_from_file(work_ctx, "./z_image_context.bin"); auto context = sd::load_tensor_from_file_as_tensor<float>("./z_image_context.bin");
print_ggml_tensor(context); print_sd_tensor(context);
struct ggml_tensor* out = nullptr; sd::Tensor<float> out;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, {}, false, &out, work_ctx); auto out_opt = compute(8,
int64_t t1 = ggml_time_ms(); x,
timesteps,
context,
{},
false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out); GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("z_image test done in %lldms", t1 - t0); LOG_DEBUG("z_image test done in %lldms", t1 - t0);
} }
} }

File diff suppressed because it is too large Load Diff