mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2026-03-24 10:18:51 +00:00
Compare commits
23 Commits
master-514
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
545fac4f3f | ||
|
|
5265a5efa1 | ||
|
|
84cbd88df1 | ||
|
|
997bb11fb6 | ||
|
|
862a6586cb | ||
|
|
61d8331ef3 | ||
|
|
acc3bf1fdc | ||
|
|
83eabd7c01 | ||
|
|
630ee03f23 | ||
|
|
f6968bc589 | ||
|
|
adfef62900 | ||
|
|
6fa7ca9317 | ||
|
|
d6dd6d7b55 | ||
|
|
dea4980f4e | ||
|
|
c8fb3d2458 | ||
|
|
3d33caaef8 | ||
|
|
9b424db0f4 | ||
|
|
d95062737e | ||
|
|
7c880f80c7 | ||
|
|
aaa8a51bd8 | ||
|
|
ba35dd734e | ||
|
|
d41f5fff69 | ||
|
|
810ef0cf76 |
91
.github/workflows/build.yml
vendored
91
.github/workflows/build.yml
vendored
@ -21,11 +21,13 @@ on:
|
|||||||
"**/*.c",
|
"**/*.c",
|
||||||
"**/*.cpp",
|
"**/*.cpp",
|
||||||
"**/*.cu",
|
"**/*.cu",
|
||||||
|
"examples/server/frontend/**",
|
||||||
]
|
]
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
paths:
|
paths:
|
||||||
[
|
[
|
||||||
|
".github/workflows/**",
|
||||||
"**/CMakeLists.txt",
|
"**/CMakeLists.txt",
|
||||||
"**/Makefile",
|
"**/Makefile",
|
||||||
"**/*.h",
|
"**/*.h",
|
||||||
@ -33,6 +35,7 @@ on:
|
|||||||
"**/*.c",
|
"**/*.c",
|
||||||
"**/*.cpp",
|
"**/*.cpp",
|
||||||
"**/*.cu",
|
"**/*.cu",
|
||||||
|
"examples/server/frontend/**",
|
||||||
]
|
]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
@ -53,6 +56,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
id: depends
|
id: depends
|
||||||
run: |
|
run: |
|
||||||
@ -70,7 +83,7 @@ jobs:
|
|||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Fetch system info
|
- name: Fetch system info
|
||||||
id: system-info
|
id: system-info
|
||||||
@ -106,6 +119,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
id: depends
|
id: depends
|
||||||
run: |
|
run: |
|
||||||
@ -123,7 +146,7 @@ jobs:
|
|||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Fetch system info
|
- name: Fetch system info
|
||||||
id: system-info
|
id: system-info
|
||||||
@ -162,7 +185,7 @@ jobs:
|
|||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
variant: [musa, sycl, vulkan]
|
variant: [musa, sycl, vulkan, cuda]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
REGISTRY: ghcr.io
|
REGISTRY: ghcr.io
|
||||||
@ -174,10 +197,20 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
@ -223,6 +256,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
id: depends
|
id: depends
|
||||||
run: |
|
run: |
|
||||||
@ -240,7 +283,7 @@ jobs:
|
|||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Fetch system info
|
- name: Fetch system info
|
||||||
id: system-info
|
id: system-info
|
||||||
@ -294,6 +337,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Install cuda-toolkit
|
- name: Install cuda-toolkit
|
||||||
id: cuda-toolkit
|
id: cuda-toolkit
|
||||||
if: ${{ matrix.build == 'cuda12' }}
|
if: ${{ matrix.build == 'cuda12' }}
|
||||||
@ -340,7 +393,7 @@ jobs:
|
|||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Pack artifacts
|
- name: Pack artifacts
|
||||||
id: pack_artifacts
|
id: pack_artifacts
|
||||||
@ -399,6 +452,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Cache ROCm Installation
|
- name: Cache ROCm Installation
|
||||||
id: cache-rocm
|
id: cache-rocm
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v4
|
||||||
@ -463,7 +526,7 @@ jobs:
|
|||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Pack artifacts
|
- name: Pack artifacts
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
@ -502,6 +565,16 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
submodules: recursive
|
submodules: recursive
|
||||||
|
|
||||||
|
- name: Setup Node
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: 20
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
- name: Free disk space
|
- name: Free disk space
|
||||||
run: |
|
run: |
|
||||||
# Remove preinstalled SDKs and caches not needed for this job
|
# Remove preinstalled SDKs and caches not needed for this job
|
||||||
@ -581,7 +654,7 @@ jobs:
|
|||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Prepare artifacts
|
- name: Prepare artifacts
|
||||||
id: prepare_artifacts
|
id: prepare_artifacts
|
||||||
@ -660,7 +733,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Get commit hash
|
- name: Get commit hash
|
||||||
id: commit
|
id: commit
|
||||||
uses: pr-mpt/actions-commit-hash@v2
|
uses: prompt/actions-commit-hash@v2
|
||||||
|
|
||||||
- name: Create release
|
- name: Create release
|
||||||
id: create_release
|
id: create_release
|
||||||
|
|||||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +1,6 @@
|
|||||||
[submodule "ggml"]
|
[submodule "ggml"]
|
||||||
path = ggml
|
path = ggml
|
||||||
url = https://github.com/ggml-org/ggml.git
|
url = https://github.com/ggml-org/ggml.git
|
||||||
|
[submodule "examples/server/frontend"]
|
||||||
|
path = examples/server/frontend
|
||||||
|
url = https://github.com/leejet/stable-ui.git
|
||||||
|
|||||||
@ -36,7 +36,6 @@ option(SD_VULKAN "sd: vulkan backend" OFF)
|
|||||||
option(SD_OPENCL "sd: opencl backend" OFF)
|
option(SD_OPENCL "sd: opencl backend" OFF)
|
||||||
option(SD_SYCL "sd: sycl backend" OFF)
|
option(SD_SYCL "sd: sycl backend" OFF)
|
||||||
option(SD_MUSA "sd: musa backend" OFF)
|
option(SD_MUSA "sd: musa backend" OFF)
|
||||||
option(SD_FAST_SOFTMAX "sd: x1.5 faster softmax, indeterministic (sometimes, same seed don't generate same image), cuda only" OFF)
|
|
||||||
option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF)
|
option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF)
|
||||||
option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF)
|
option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF)
|
||||||
option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF)
|
option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF)
|
||||||
@ -70,18 +69,12 @@ if (SD_HIPBLAS)
|
|||||||
message("-- Use HIPBLAS as backend stable-diffusion")
|
message("-- Use HIPBLAS as backend stable-diffusion")
|
||||||
set(GGML_HIP ON)
|
set(GGML_HIP ON)
|
||||||
add_definitions(-DSD_USE_CUDA)
|
add_definitions(-DSD_USE_CUDA)
|
||||||
if(SD_FAST_SOFTMAX)
|
|
||||||
set(GGML_CUDA_FAST_SOFTMAX ON)
|
|
||||||
endif()
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if(SD_MUSA)
|
if(SD_MUSA)
|
||||||
message("-- Use MUSA as backend stable-diffusion")
|
message("-- Use MUSA as backend stable-diffusion")
|
||||||
set(GGML_MUSA ON)
|
set(GGML_MUSA ON)
|
||||||
add_definitions(-DSD_USE_CUDA)
|
add_definitions(-DSD_USE_CUDA)
|
||||||
if(SD_FAST_SOFTMAX)
|
|
||||||
set(GGML_CUDA_FAST_SOFTMAX ON)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(SD_LIB stable-diffusion)
|
set(SD_LIB stable-diffusion)
|
||||||
|
|||||||
25
Dockerfile.cuda
Normal file
25
Dockerfile.cuda
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
ARG CUDA_VERSION=12.6.3
|
||||||
|
ARG UBUNTU_VERSION=24.04
|
||||||
|
|
||||||
|
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION} AS build
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends build-essential git ccache cmake
|
||||||
|
|
||||||
|
WORKDIR /sd.cpp
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ARG CUDACXX=/usr/local/cuda/bin/nvcc
|
||||||
|
RUN cmake . -B ./build -DSD_CUDA=ON
|
||||||
|
RUN cmake --build ./build --config Release -j$(nproc)
|
||||||
|
|
||||||
|
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-runtime-ubuntu${UBUNTU_VERSION} AS runtime
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install --yes --no-install-recommends libgomp1 && \
|
||||||
|
apt-get clean
|
||||||
|
|
||||||
|
COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli
|
||||||
|
COPY --from=build /sd.cpp/build/bin/sd-server /sd-server
|
||||||
|
|
||||||
|
ENTRYPOINT [ "/sd-cli" ]
|
||||||
@ -5,6 +5,7 @@
|
|||||||
- Download Anima
|
- Download Anima
|
||||||
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/diffusion_models
|
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/diffusion_models
|
||||||
- gguf: https://huggingface.co/Bedovyy/Anima-GGUF/tree/main
|
- gguf: https://huggingface.co/Bedovyy/Anima-GGUF/tree/main
|
||||||
|
- gguf Anima2: https://huggingface.co/JusteLeo/Anima2-GGUF/tree/main
|
||||||
- Download vae
|
- Download vae
|
||||||
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/vae
|
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/vae
|
||||||
- Download Qwen3-0.6B-Base
|
- Download Qwen3-0.6B-Base
|
||||||
@ -17,4 +18,4 @@
|
|||||||
.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\anima-preview.safetensors --vae ..\..\ComfyUI\models\vae\qwen_image_vae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_06b_base.safetensors -p "a lovely cat holding a sign says 'anima.cpp'" --cfg-scale 6.0 --sampling-method euler -v --offload-to-cpu --diffusion-fa
|
.\bin\Release\sd-cli.exe --diffusion-model ..\..\ComfyUI\models\diffusion_models\anima-preview.safetensors --vae ..\..\ComfyUI\models\vae\qwen_image_vae.safetensors --llm ..\..\ComfyUI\models\text_encoders\qwen_3_06b_base.safetensors -p "a lovely cat holding a sign says 'anima.cpp'" --cfg-scale 6.0 --sampling-method euler -v --offload-to-cpu --diffusion-fa
|
||||||
```
|
```
|
||||||
|
|
||||||
<img alt="anima image example" src="../assets/anima/example.png" />
|
<img alt="anima image example" src="../assets/anima/example.png" />
|
||||||
|
|||||||
@ -11,6 +11,7 @@ Caching methods accelerate diffusion inference by reusing intermediate computati
|
|||||||
| `dbcache` | DiT models | Block-level L1 residual threshold |
|
| `dbcache` | DiT models | Block-level L1 residual threshold |
|
||||||
| `taylorseer` | DiT models | Taylor series approximation |
|
| `taylorseer` | DiT models | Taylor series approximation |
|
||||||
| `cache-dit` | DiT models | Combined DBCache + TaylorSeer |
|
| `cache-dit` | DiT models | Combined DBCache + TaylorSeer |
|
||||||
|
| `spectrum` | UNET models | Chebyshev + Taylor output forecasting |
|
||||||
|
|
||||||
### UCache (UNET Models)
|
### UCache (UNET Models)
|
||||||
|
|
||||||
@ -79,7 +80,7 @@ Uses Taylor series approximation to predict block outputs:
|
|||||||
Combines DBCache and TaylorSeer:
|
Combines DBCache and TaylorSeer:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
--cache-mode cache-dit --cache-preset fast
|
--cache-mode cache-dit
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Parameters
|
#### Parameters
|
||||||
@ -91,14 +92,6 @@ Combines DBCache and TaylorSeer:
|
|||||||
| `threshold` | L1 residual difference threshold | 0.08 |
|
| `threshold` | L1 residual difference threshold | 0.08 |
|
||||||
| `warmup` | Steps before caching starts | 8 |
|
| `warmup` | Steps before caching starts | 8 |
|
||||||
|
|
||||||
#### Presets
|
|
||||||
|
|
||||||
Available presets: `slow`, `medium`, `fast`, `ultra` (or `s`, `m`, `f`, `u`).
|
|
||||||
|
|
||||||
```bash
|
|
||||||
--cache-mode cache-dit --cache-preset fast
|
|
||||||
```
|
|
||||||
|
|
||||||
#### SCM Options
|
#### SCM Options
|
||||||
|
|
||||||
Steps Computation Mask controls which steps can be cached:
|
Steps Computation Mask controls which steps can be cached:
|
||||||
@ -118,6 +111,28 @@ Mask values: `1` = compute, `0` = can cache.
|
|||||||
--scm-policy dynamic
|
--scm-policy dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Spectrum (UNET Models)
|
||||||
|
|
||||||
|
Spectrum uses Chebyshev polynomial fitting blended with Taylor extrapolation to predict denoised outputs, skipping entire UNet forward passes. Based on the paper [Spectrum: Adaptive Spectral Feature Forecasting for Efficient Diffusion Sampling](https://github.com/tingyu215/Spectrum).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Parameters
|
||||||
|
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
|-----------|-------------|---------|
|
||||||
|
| `w` | Chebyshev vs Taylor blend weight (0=Taylor, 1=Chebyshev) | 0.40 |
|
||||||
|
| `m` | Chebyshev polynomial degree | 3 |
|
||||||
|
| `lam` | Ridge regression regularization | 1.0 |
|
||||||
|
| `window` | Initial window size (compute every N steps) | 2 |
|
||||||
|
| `flex` | Window growth per computed step after warmup | 0.50 |
|
||||||
|
| `warmup` | Steps to always compute before caching starts | 4 |
|
||||||
|
| `stop` | Stop caching at this fraction of total steps | 0.9 |
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### Performance Tips
|
### Performance Tips
|
||||||
|
|
||||||
- Start with default thresholds and adjust based on output quality
|
- Start with default thresholds and adjust based on output quality
|
||||||
|
|||||||
@ -4,11 +4,12 @@
|
|||||||
usage: ./bin/sd-cli [options]
|
usage: ./bin/sd-cli [options]
|
||||||
|
|
||||||
CLI Options:
|
CLI Options:
|
||||||
-o, --output <string> path to write result image to. you can use printf-style %d format specifiers for image sequences (default: ./output.png) (eg. output_%03d.png)
|
-o, --output <string> path to write result image to. you can use printf-style %d format specifiers for image sequences (default:
|
||||||
--output-begin-idx <int> starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise)
|
./output.png) (eg. output_%03d.png)
|
||||||
--preview-path <string> path to write preview image to (default: ./preview.png)
|
--preview-path <string> path to write preview image to (default: ./preview.png)
|
||||||
--preview-interval <int> interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at
|
--preview-interval <int> interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at
|
||||||
every step)
|
every step)
|
||||||
|
--output-begin-idx <int> starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise)
|
||||||
--canny apply canny preprocessor (edge detection)
|
--canny apply canny preprocessor (edge detection)
|
||||||
--convert-name convert tensor name (for convert mode)
|
--convert-name convert tensor name (for convert mode)
|
||||||
-v, --verbose print extra info
|
-v, --verbose print extra info
|
||||||
@ -59,6 +60,7 @@ Context Options:
|
|||||||
--circularx enable circular RoPE wrapping on x-axis (width) only
|
--circularx enable circular RoPE wrapping on x-axis (width) only
|
||||||
--circulary enable circular RoPE wrapping on y-axis (height) only
|
--circulary enable circular RoPE wrapping on y-axis (height) only
|
||||||
--chroma-disable-dit-mask disable dit mask for chroma
|
--chroma-disable-dit-mask disable dit mask for chroma
|
||||||
|
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
|
||||||
--chroma-enable-t5-mask enable t5 mask for chroma
|
--chroma-enable-t5-mask enable t5 mask for chroma
|
||||||
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
|
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
|
||||||
type of the weight file
|
type of the weight file
|
||||||
@ -107,7 +109,7 @@ Generation Options:
|
|||||||
medium
|
medium
|
||||||
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
||||||
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
||||||
--eta <float> eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0)
|
--eta <float> eta in DDIM, only for DDIM and TCD (default: 0)
|
||||||
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
||||||
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
||||||
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
|
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
|
||||||
@ -115,7 +117,7 @@ Generation Options:
|
|||||||
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
|
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
|
||||||
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
||||||
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
||||||
--high-noise-eta <float> (high noise) eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0)
|
--high-noise-eta <float> (high noise) eta in DDIM, only for DDIM and TCD (default: 0)
|
||||||
--strength <float> strength for noising/unnoising (default: 0.75)
|
--strength <float> strength for noising/unnoising (default: 0.75)
|
||||||
--pm-style-strength <float>
|
--pm-style-strength <float>
|
||||||
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
|
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
|
||||||
@ -124,21 +126,24 @@ Generation Options:
|
|||||||
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
|
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
|
||||||
--disable-auto-resize-ref-image disable auto resize of ref images
|
--disable-auto-resize-ref-image disable auto resize of ref images
|
||||||
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
||||||
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd,
|
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
|
||||||
res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a otherwise)
|
tcd, res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a
|
||||||
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
|
otherwise)
|
||||||
tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan, euler_a otherwise
|
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm,
|
||||||
|
ddim_trailing, tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan,
|
||||||
|
euler_a otherwise
|
||||||
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
|
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
|
||||||
kl_optimal, lcm, bong_tangent], default: discrete
|
kl_optimal, lcm, bong_tangent], default: discrete
|
||||||
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
|
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
|
||||||
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
||||||
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
||||||
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
||||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)
|
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level),
|
||||||
|
'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
|
||||||
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
||||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
|
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=;
|
||||||
"threshold=0.25" or "threshold=1.5,reset=0"
|
spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples:
|
||||||
--cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'
|
"threshold=0.25" or "threshold=1.5,reset=0" or "w=0.4,window=2"
|
||||||
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
||||||
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
||||||
```
|
```
|
||||||
|
|||||||
@ -1047,7 +1047,6 @@ struct SDGenerationParams {
|
|||||||
|
|
||||||
std::string cache_mode;
|
std::string cache_mode;
|
||||||
std::string cache_option;
|
std::string cache_option;
|
||||||
std::string cache_preset;
|
|
||||||
std::string scm_mask;
|
std::string scm_mask;
|
||||||
bool scm_policy_dynamic = true;
|
bool scm_policy_dynamic = true;
|
||||||
sd_cache_params_t cache_params{};
|
sd_cache_params_t cache_params{};
|
||||||
@ -1422,8 +1421,8 @@ struct SDGenerationParams {
|
|||||||
}
|
}
|
||||||
cache_mode = argv_to_utf8(index, argv);
|
cache_mode = argv_to_utf8(index, argv);
|
||||||
if (cache_mode != "easycache" && cache_mode != "ucache" &&
|
if (cache_mode != "easycache" && cache_mode != "ucache" &&
|
||||||
cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit") {
|
cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit" && cache_mode != "spectrum") {
|
||||||
fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', or 'cache-dit'\n", cache_mode.c_str());
|
fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', 'cache-dit', or 'spectrum'\n", cache_mode.c_str());
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
@ -1461,21 +1460,6 @@ struct SDGenerationParams {
|
|||||||
return 1;
|
return 1;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto on_cache_preset_arg = [&](int argc, const char** argv, int index) {
|
|
||||||
if (++index >= argc) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
cache_preset = argv_to_utf8(index, argv);
|
|
||||||
if (cache_preset != "slow" && cache_preset != "s" && cache_preset != "S" &&
|
|
||||||
cache_preset != "medium" && cache_preset != "m" && cache_preset != "M" &&
|
|
||||||
cache_preset != "fast" && cache_preset != "f" && cache_preset != "F" &&
|
|
||||||
cache_preset != "ultra" && cache_preset != "u" && cache_preset != "U") {
|
|
||||||
fprintf(stderr, "error: invalid cache preset '%s', must be 'slow'/'s', 'medium'/'m', 'fast'/'f', or 'ultra'/'u'\n", cache_preset.c_str());
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.manual_options = {
|
options.manual_options = {
|
||||||
{"-s",
|
{"-s",
|
||||||
"--seed",
|
"--seed",
|
||||||
@ -1513,16 +1497,12 @@ struct SDGenerationParams {
|
|||||||
on_ref_image_arg},
|
on_ref_image_arg},
|
||||||
{"",
|
{"",
|
||||||
"--cache-mode",
|
"--cache-mode",
|
||||||
"caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)",
|
"caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)",
|
||||||
on_cache_mode_arg},
|
on_cache_mode_arg},
|
||||||
{"",
|
{"",
|
||||||
"--cache-option",
|
"--cache-option",
|
||||||
"named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"",
|
"named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=; spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"",
|
||||||
on_cache_option_arg},
|
on_cache_option_arg},
|
||||||
{"",
|
|
||||||
"--cache-preset",
|
|
||||||
"cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'",
|
|
||||||
on_cache_preset_arg},
|
|
||||||
{"",
|
{"",
|
||||||
"--scm-mask",
|
"--scm-mask",
|
||||||
"SCM steps mask for cache-dit: comma-separated 0/1 (e.g., \"1,1,1,0,0,1,0,0,1,0\") - 1=compute, 0=can cache",
|
"SCM steps mask for cache-dit: comma-separated 0/1 (e.g., \"1,1,1,0,0,1,0,0,1,0\") - 1=compute, 0=can cache",
|
||||||
@ -1575,7 +1555,6 @@ struct SDGenerationParams {
|
|||||||
load_if_exists("negative_prompt", negative_prompt);
|
load_if_exists("negative_prompt", negative_prompt);
|
||||||
load_if_exists("cache_mode", cache_mode);
|
load_if_exists("cache_mode", cache_mode);
|
||||||
load_if_exists("cache_option", cache_option);
|
load_if_exists("cache_option", cache_option);
|
||||||
load_if_exists("cache_preset", cache_preset);
|
|
||||||
load_if_exists("scm_mask", scm_mask);
|
load_if_exists("scm_mask", scm_mask);
|
||||||
|
|
||||||
load_if_exists("clip_skip", clip_skip);
|
load_if_exists("clip_skip", clip_skip);
|
||||||
@ -1779,7 +1758,23 @@ struct SDGenerationParams {
|
|||||||
} else if (key == "Bn" || key == "bn") {
|
} else if (key == "Bn" || key == "bn") {
|
||||||
cache_params.Bn_compute_blocks = std::stoi(val);
|
cache_params.Bn_compute_blocks = std::stoi(val);
|
||||||
} else if (key == "warmup") {
|
} else if (key == "warmup") {
|
||||||
cache_params.max_warmup_steps = std::stoi(val);
|
if (cache_mode == "spectrum") {
|
||||||
|
cache_params.spectrum_warmup_steps = std::stoi(val);
|
||||||
|
} else {
|
||||||
|
cache_params.max_warmup_steps = std::stoi(val);
|
||||||
|
}
|
||||||
|
} else if (key == "w") {
|
||||||
|
cache_params.spectrum_w = std::stof(val);
|
||||||
|
} else if (key == "m") {
|
||||||
|
cache_params.spectrum_m = std::stoi(val);
|
||||||
|
} else if (key == "lam") {
|
||||||
|
cache_params.spectrum_lam = std::stof(val);
|
||||||
|
} else if (key == "window") {
|
||||||
|
cache_params.spectrum_window_size = std::stoi(val);
|
||||||
|
} else if (key == "flex") {
|
||||||
|
cache_params.spectrum_flex_window = std::stof(val);
|
||||||
|
} else if (key == "stop") {
|
||||||
|
cache_params.spectrum_stop_percent = std::stof(val);
|
||||||
} else {
|
} else {
|
||||||
LOG_ERROR("error: unknown cache parameter '%s'", key.c_str());
|
LOG_ERROR("error: unknown cache parameter '%s'", key.c_str());
|
||||||
return false;
|
return false;
|
||||||
@ -1794,39 +1789,17 @@ struct SDGenerationParams {
|
|||||||
|
|
||||||
if (!cache_mode.empty()) {
|
if (!cache_mode.empty()) {
|
||||||
if (cache_mode == "easycache") {
|
if (cache_mode == "easycache") {
|
||||||
cache_params.mode = SD_CACHE_EASYCACHE;
|
cache_params.mode = SD_CACHE_EASYCACHE;
|
||||||
cache_params.reuse_threshold = 0.2f;
|
|
||||||
cache_params.start_percent = 0.15f;
|
|
||||||
cache_params.end_percent = 0.95f;
|
|
||||||
cache_params.error_decay_rate = 1.0f;
|
|
||||||
cache_params.use_relative_threshold = true;
|
|
||||||
cache_params.reset_error_on_compute = true;
|
|
||||||
} else if (cache_mode == "ucache") {
|
} else if (cache_mode == "ucache") {
|
||||||
cache_params.mode = SD_CACHE_UCACHE;
|
cache_params.mode = SD_CACHE_UCACHE;
|
||||||
cache_params.reuse_threshold = 1.0f;
|
|
||||||
cache_params.start_percent = 0.15f;
|
|
||||||
cache_params.end_percent = 0.95f;
|
|
||||||
cache_params.error_decay_rate = 1.0f;
|
|
||||||
cache_params.use_relative_threshold = true;
|
|
||||||
cache_params.reset_error_on_compute = true;
|
|
||||||
} else if (cache_mode == "dbcache") {
|
} else if (cache_mode == "dbcache") {
|
||||||
cache_params.mode = SD_CACHE_DBCACHE;
|
cache_params.mode = SD_CACHE_DBCACHE;
|
||||||
cache_params.Fn_compute_blocks = 8;
|
|
||||||
cache_params.Bn_compute_blocks = 0;
|
|
||||||
cache_params.residual_diff_threshold = 0.08f;
|
|
||||||
cache_params.max_warmup_steps = 8;
|
|
||||||
} else if (cache_mode == "taylorseer") {
|
} else if (cache_mode == "taylorseer") {
|
||||||
cache_params.mode = SD_CACHE_TAYLORSEER;
|
cache_params.mode = SD_CACHE_TAYLORSEER;
|
||||||
cache_params.Fn_compute_blocks = 8;
|
|
||||||
cache_params.Bn_compute_blocks = 0;
|
|
||||||
cache_params.residual_diff_threshold = 0.08f;
|
|
||||||
cache_params.max_warmup_steps = 8;
|
|
||||||
} else if (cache_mode == "cache-dit") {
|
} else if (cache_mode == "cache-dit") {
|
||||||
cache_params.mode = SD_CACHE_CACHE_DIT;
|
cache_params.mode = SD_CACHE_CACHE_DIT;
|
||||||
cache_params.Fn_compute_blocks = 8;
|
} else if (cache_mode == "spectrum") {
|
||||||
cache_params.Bn_compute_blocks = 0;
|
cache_params.mode = SD_CACHE_SPECTRUM;
|
||||||
cache_params.residual_diff_threshold = 0.08f;
|
|
||||||
cache_params.max_warmup_steps = 8;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cache_option.empty()) {
|
if (!cache_option.empty()) {
|
||||||
|
|||||||
@ -1,6 +1,73 @@
|
|||||||
set(TARGET sd-server)
|
set(TARGET sd-server)
|
||||||
|
|
||||||
|
option(SD_SERVER_BUILD_FRONTEND "Build server frontend with pnpm" ON)
|
||||||
|
|
||||||
|
set(FRONTEND_DIR "${CMAKE_CURRENT_SOURCE_DIR}/frontend")
|
||||||
|
set(GENERATED_HTML_HEADER "${FRONTEND_DIR}/dist/gen_index_html.h")
|
||||||
|
|
||||||
|
set(HAVE_FRONTEND_BUILD OFF)
|
||||||
|
|
||||||
|
if(SD_SERVER_BUILD_FRONTEND AND EXISTS "${FRONTEND_DIR}")
|
||||||
|
if(WIN32)
|
||||||
|
find_program(PNPM_EXECUTABLE NAMES pnpm.cmd pnpm)
|
||||||
|
else()
|
||||||
|
find_program(PNPM_EXECUTABLE NAMES pnpm)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(PNPM_EXECUTABLE)
|
||||||
|
message(STATUS "Frontend dir found: ${FRONTEND_DIR}")
|
||||||
|
message(STATUS "pnpm found: ${PNPM_EXECUTABLE}")
|
||||||
|
|
||||||
|
set(HAVE_FRONTEND_BUILD ON)
|
||||||
|
|
||||||
|
add_custom_target(${TARGET}_frontend_install
|
||||||
|
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" install
|
||||||
|
WORKING_DIRECTORY "${FRONTEND_DIR}"
|
||||||
|
COMMENT "Installing frontend dependencies"
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_target(${TARGET}_frontend_build
|
||||||
|
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" run build
|
||||||
|
WORKING_DIRECTORY "${FRONTEND_DIR}"
|
||||||
|
COMMENT "Building frontend"
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
|
||||||
|
add_custom_target(${TARGET}_frontend_header
|
||||||
|
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" run build:header
|
||||||
|
WORKING_DIRECTORY "${FRONTEND_DIR}"
|
||||||
|
COMMENT "Generating gen_index_html.h"
|
||||||
|
VERBATIM
|
||||||
|
)
|
||||||
|
|
||||||
|
add_dependencies(${TARGET}_frontend_build ${TARGET}_frontend_install)
|
||||||
|
add_dependencies(${TARGET}_frontend_header ${TARGET}_frontend_build)
|
||||||
|
|
||||||
|
add_custom_target(${TARGET}_frontend
|
||||||
|
DEPENDS ${TARGET}_frontend_header
|
||||||
|
)
|
||||||
|
|
||||||
|
set_source_files_properties("${GENERATED_HTML_HEADER}" PROPERTIES GENERATED TRUE)
|
||||||
|
else()
|
||||||
|
message(WARNING "pnpm not found, frontend build disabled")
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
message(STATUS "Frontend disabled or directory not found: ${FRONTEND_DIR}")
|
||||||
|
endif()
|
||||||
|
|
||||||
add_executable(${TARGET} main.cpp)
|
add_executable(${TARGET} main.cpp)
|
||||||
|
|
||||||
|
if(HAVE_FRONTEND_BUILD)
|
||||||
|
add_dependencies(${TARGET} ${TARGET}_frontend)
|
||||||
|
target_sources(${TARGET} PRIVATE "${GENERATED_HTML_HEADER}")
|
||||||
|
target_include_directories(${TARGET} PRIVATE "${FRONTEND_DIR}/dist")
|
||||||
|
target_compile_definitions(${TARGET} PRIVATE HAVE_INDEX_HTML)
|
||||||
|
message(STATUS "HAVE_INDEX_HTML enabled")
|
||||||
|
else()
|
||||||
|
message(STATUS "HAVE_INDEX_HTML disabled")
|
||||||
|
endif()
|
||||||
|
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_link_libraries(${TARGET} PRIVATE stable-diffusion ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE stable-diffusion ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_compile_features(${TARGET} PUBLIC c_std_11 cxx_std_17)
|
target_compile_features(${TARGET} PUBLIC c_std_11 cxx_std_17)
|
||||||
@ -1,15 +1,104 @@
|
|||||||
|
# Frontend
|
||||||
|
|
||||||
|
## Build with Frontend
|
||||||
|
|
||||||
|
The server can optionally build the web frontend and embed it into the binary as `gen_index_html.h`.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
Install the following tools:
|
||||||
|
|
||||||
|
* **Node.js** ≥ 22.18
|
||||||
|
https://nodejs.org/
|
||||||
|
|
||||||
|
* **pnpm** ≥ 10
|
||||||
|
Install via npm:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
npm install -g pnpm
|
||||||
|
```
|
||||||
|
|
||||||
|
Verify installation:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
node -v
|
||||||
|
pnpm -v
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install frontend dependencies
|
||||||
|
|
||||||
|
Go to the frontend directory and install dependencies:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd examples/server/frontend
|
||||||
|
pnpm install
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build the server with CMake
|
||||||
|
|
||||||
|
Enable the frontend build option when configuring CMake:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DSD_SERVER_BUILD_FRONTEND=ON
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
If `pnpm` is available, the build system will automatically run:
|
||||||
|
|
||||||
|
```
|
||||||
|
pnpm run build
|
||||||
|
pnpm run build:header
|
||||||
|
```
|
||||||
|
|
||||||
|
and embed the generated frontend into the server binary.
|
||||||
|
|
||||||
|
## Frontend Repository
|
||||||
|
|
||||||
|
The web frontend is maintained in a **separate repository**, https://github.com/leejet/stable-ui.
|
||||||
|
|
||||||
|
If you want to modify the UI or frontend logic, please submit pull requests to the **frontend repository**.
|
||||||
|
|
||||||
|
This repository (`stable-diffusion.cpp`) only vendors the frontend periodically. Changes from the frontend repo are synchronized:
|
||||||
|
|
||||||
|
* approximately **every 1–2 weeks**, or
|
||||||
|
* when there are **major frontend updates**
|
||||||
|
|
||||||
|
Because of this, frontend changes will **not appear here immediately** after being merged upstream.
|
||||||
|
|
||||||
|
## Using an external frontend
|
||||||
|
|
||||||
|
By default, the server uses the **embedded frontend** generated during the build (`gen_index_html.h`).
|
||||||
|
|
||||||
|
You can also serve a custom frontend file instead of the embedded one by using:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
--serve-html-path <path-to-index.html>
|
||||||
|
```
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sd-server --serve-html-path ./index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
In this case, the server will load and serve the specified `index.html` file instead of the embedded frontend. This is useful when:
|
||||||
|
|
||||||
|
* developing or testing frontend changes
|
||||||
|
* using a custom UI
|
||||||
|
* avoiding rebuilding the binary after frontend modifications
|
||||||
|
|
||||||
# Run
|
# Run
|
||||||
|
|
||||||
```
|
```
|
||||||
usage: ./bin/sd-server [options]
|
usage: ./bin/sd-server [options]
|
||||||
|
|
||||||
Svr Options:
|
Svr Options:
|
||||||
-l, --listen-ip <string> server listen ip (default: 127.0.0.1)
|
-l, --listen-ip <string> server listen ip (default: 127.0.0.1)
|
||||||
--listen-port <int> server listen port (default: 1234)
|
--serve-html-path <string> path to HTML file to serve at root (optional)
|
||||||
--serve-html-path <string> path to HTML file to serve at root (optional)
|
--listen-port <int> server listen port (default: 1234)
|
||||||
-v, --verbose print extra info
|
-v, --verbose print extra info
|
||||||
--color colors the logging tags according to level
|
--color colors the logging tags according to level
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
|
|
||||||
Context Options:
|
Context Options:
|
||||||
-m, --model <string> path to full model
|
-m, --model <string> path to full model
|
||||||
@ -39,10 +128,10 @@ Context Options:
|
|||||||
--vae-tiling process vae in tiles to reduce memory usage
|
--vae-tiling process vae in tiles to reduce memory usage
|
||||||
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
|
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
|
||||||
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed
|
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed
|
||||||
|
--mmap whether to memory-map model
|
||||||
--control-net-cpu keep controlnet in cpu (for low vram)
|
--control-net-cpu keep controlnet in cpu (for low vram)
|
||||||
--clip-on-cpu keep clip in cpu (for low vram)
|
--clip-on-cpu keep clip in cpu (for low vram)
|
||||||
--vae-on-cpu keep vae in cpu (for low vram)
|
--vae-on-cpu keep vae in cpu (for low vram)
|
||||||
--mmap whether to memory-map model
|
|
||||||
--fa use flash attention
|
--fa use flash attention
|
||||||
--diffusion-fa use flash attention in the diffusion model only
|
--diffusion-fa use flash attention in the diffusion model only
|
||||||
--diffusion-conv-direct use ggml_conv2d_direct in the diffusion model
|
--diffusion-conv-direct use ggml_conv2d_direct in the diffusion model
|
||||||
@ -51,6 +140,7 @@ Context Options:
|
|||||||
--circularx enable circular RoPE wrapping on x-axis (width) only
|
--circularx enable circular RoPE wrapping on x-axis (width) only
|
||||||
--circulary enable circular RoPE wrapping on y-axis (height) only
|
--circulary enable circular RoPE wrapping on y-axis (height) only
|
||||||
--chroma-disable-dit-mask disable dit mask for chroma
|
--chroma-disable-dit-mask disable dit mask for chroma
|
||||||
|
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
|
||||||
--chroma-enable-t5-mask enable t5 mask for chroma
|
--chroma-enable-t5-mask enable t5 mask for chroma
|
||||||
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
|
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
|
||||||
type of the weight file
|
type of the weight file
|
||||||
@ -99,7 +189,7 @@ Default Generation Options:
|
|||||||
medium
|
medium
|
||||||
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
||||||
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
||||||
--eta <float> eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0)
|
--eta <float> eta in DDIM, only for DDIM and TCD (default: 0)
|
||||||
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
||||||
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
||||||
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
|
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
|
||||||
@ -107,7 +197,7 @@ Default Generation Options:
|
|||||||
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
|
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
|
||||||
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
||||||
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
||||||
--high-noise-eta <float> (high noise) eta in DDIM, only for DDIM/TCD/res_multistep/res_2s (default: 0)
|
--high-noise-eta <float> (high noise) eta in DDIM, only for DDIM and TCD (default: 0)
|
||||||
--strength <float> strength for noising/unnoising (default: 0.75)
|
--strength <float> strength for noising/unnoising (default: 0.75)
|
||||||
--pm-style-strength <float>
|
--pm-style-strength <float>
|
||||||
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
|
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
|
||||||
@ -116,21 +206,22 @@ Default Generation Options:
|
|||||||
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
|
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
|
||||||
--disable-auto-resize-ref-image disable auto resize of ref images
|
--disable-auto-resize-ref-image disable auto resize of ref images
|
||||||
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
||||||
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd,
|
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
|
||||||
res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a otherwise)
|
tcd, res_multistep, res_2s] (default: euler for Flux/SD3/Wan, euler_a
|
||||||
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
|
otherwise)
|
||||||
tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan, euler_a otherwise
|
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm,
|
||||||
|
ddim_trailing, tcd, res_multistep, res_2s] default: euler for Flux/SD3/Wan,
|
||||||
|
euler_a otherwise
|
||||||
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
|
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
|
||||||
kl_optimal, lcm, bong_tangent], default: discrete
|
kl_optimal, lcm, bong_tangent], default: discrete
|
||||||
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
|
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
|
||||||
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
||||||
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
||||||
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
||||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)
|
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
|
||||||
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
||||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
|
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
|
||||||
"threshold=0.25" or "threshold=1.5,reset=0"
|
"threshold=0.25" or "threshold=1.5,reset=0"
|
||||||
--cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'
|
|
||||||
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
||||||
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
||||||
```
|
```
|
||||||
|
|||||||
1
examples/server/frontend
Submodule
1
examples/server/frontend
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 1a34176cd6d39ad3a226b2b69047e71f6797f6bc
|
||||||
@ -13,6 +13,10 @@
|
|||||||
|
|
||||||
#include "common/common.hpp"
|
#include "common/common.hpp"
|
||||||
|
|
||||||
|
#ifdef HAVE_INDEX_HTML
|
||||||
|
#include "frontend/dist/gen_index_html.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
// ----------------------- helpers -----------------------
|
// ----------------------- helpers -----------------------
|
||||||
@ -269,6 +273,18 @@ struct LoraEntry {
|
|||||||
std::string fullpath;
|
std::string fullpath;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void free_results(sd_image_t* result_images, int num_results) {
|
||||||
|
if (result_images) {
|
||||||
|
for (int i = 0; i < num_results; ++i) {
|
||||||
|
if (result_images[i].data) {
|
||||||
|
stbi_image_free(result_images[i].data);
|
||||||
|
result_images[i].data = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
free(result_images);
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, const char** argv) {
|
int main(int argc, const char** argv) {
|
||||||
if (argc > 1 && std::string(argv[1]) == "--version") {
|
if (argc > 1 && std::string(argv[1]) == "--version") {
|
||||||
std::cout << version_string() << "\n";
|
std::cout << version_string() << "\n";
|
||||||
@ -345,7 +361,7 @@ int main(int argc, const char** argv) {
|
|||||||
auto get_lora_full_path = [&](const std::string& path) -> std::string {
|
auto get_lora_full_path = [&](const std::string& path) -> std::string {
|
||||||
std::lock_guard<std::mutex> lock(lora_mutex);
|
std::lock_guard<std::mutex> lock(lora_mutex);
|
||||||
auto it = std::find_if(lora_cache.begin(), lora_cache.end(),
|
auto it = std::find_if(lora_cache.begin(), lora_cache.end(),
|
||||||
[&](const LoraEntry& e) { return e.path == path; });
|
[&](const LoraEntry& e) { return e.path == path; });
|
||||||
return (it != lora_cache.end()) ? it->fullpath : "";
|
return (it != lora_cache.end()) ? it->fullpath : "";
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -368,7 +384,13 @@ int main(int argc, const char** argv) {
|
|||||||
return httplib::Server::HandlerResponse::Unhandled;
|
return httplib::Server::HandlerResponse::Unhandled;
|
||||||
});
|
});
|
||||||
|
|
||||||
// root
|
// index html
|
||||||
|
std::string index_html;
|
||||||
|
#ifdef HAVE_INDEX_HTML
|
||||||
|
index_html.assign(reinterpret_cast<const char*>(index_html_bytes), index_html_size);
|
||||||
|
#else
|
||||||
|
index_html = "Stable Diffusion Server is running";
|
||||||
|
#endif
|
||||||
svr.Get("/", [&](const httplib::Request&, httplib::Response& res) {
|
svr.Get("/", [&](const httplib::Request&, httplib::Response& res) {
|
||||||
if (!svr_params.serve_html_path.empty()) {
|
if (!svr_params.serve_html_path.empty()) {
|
||||||
std::ifstream file(svr_params.serve_html_path);
|
std::ifstream file(svr_params.serve_html_path);
|
||||||
@ -380,7 +402,7 @@ int main(int argc, const char** argv) {
|
|||||||
res.set_content("Error: Unable to read HTML file", "text/plain");
|
res.set_content("Error: Unable to read HTML file", "text/plain");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
res.set_content("Stable Diffusion Server is running", "text/plain");
|
res.set_content(index_html, "text/html");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -537,6 +559,7 @@ int main(int argc, const char** argv) {
|
|||||||
item["b64_json"] = b64;
|
item["b64_json"] = b64;
|
||||||
out["data"].push_back(item);
|
out["data"].push_back(item);
|
||||||
}
|
}
|
||||||
|
free_results(results, num_results);
|
||||||
|
|
||||||
res.set_content(out.dump(), "application/json");
|
res.set_content(out.dump(), "application/json");
|
||||||
res.status = 200;
|
res.status = 200;
|
||||||
@ -567,7 +590,7 @@ int main(int argc, const char** argv) {
|
|||||||
|
|
||||||
std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(prompt);
|
std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(prompt);
|
||||||
|
|
||||||
size_t image_count = req.form.get_file_count("image[]");
|
size_t image_count = req.form.get_file_count("image[]");
|
||||||
bool has_legacy_image = req.form.has_file("image");
|
bool has_legacy_image = req.form.has_file("image");
|
||||||
if (image_count == 0 && !has_legacy_image) {
|
if (image_count == 0 && !has_legacy_image) {
|
||||||
res.status = 400;
|
res.status = 400;
|
||||||
@ -781,6 +804,7 @@ int main(int argc, const char** argv) {
|
|||||||
item["b64_json"] = b64;
|
item["b64_json"] = b64;
|
||||||
out["data"].push_back(item);
|
out["data"].push_back(item);
|
||||||
}
|
}
|
||||||
|
free_results(results, num_results);
|
||||||
|
|
||||||
res.set_content(out.dump(), "application/json");
|
res.set_content(out.dump(), "application/json");
|
||||||
res.status = 200;
|
res.status = 200;
|
||||||
@ -1101,6 +1125,7 @@ int main(int argc, const char** argv) {
|
|||||||
std::string b64 = base64_encode(image_bytes);
|
std::string b64 = base64_encode(image_bytes);
|
||||||
out["images"].push_back(b64);
|
out["images"].push_back(b64);
|
||||||
}
|
}
|
||||||
|
free_results(results, num_results);
|
||||||
|
|
||||||
res.set_content(out.dump(), "application/json");
|
res.set_content(out.dump(), "application/json");
|
||||||
res.status = 200;
|
res.status = 200;
|
||||||
|
|||||||
@ -251,6 +251,7 @@ enum sd_cache_mode_t {
|
|||||||
SD_CACHE_DBCACHE,
|
SD_CACHE_DBCACHE,
|
||||||
SD_CACHE_TAYLORSEER,
|
SD_CACHE_TAYLORSEER,
|
||||||
SD_CACHE_CACHE_DIT,
|
SD_CACHE_CACHE_DIT,
|
||||||
|
SD_CACHE_SPECTRUM,
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -271,6 +272,13 @@ typedef struct {
|
|||||||
int taylorseer_skip_interval;
|
int taylorseer_skip_interval;
|
||||||
const char* scm_mask;
|
const char* scm_mask;
|
||||||
bool scm_policy_dynamic;
|
bool scm_policy_dynamic;
|
||||||
|
float spectrum_w;
|
||||||
|
int spectrum_m;
|
||||||
|
float spectrum_lam;
|
||||||
|
int spectrum_window_size;
|
||||||
|
float spectrum_flex_window;
|
||||||
|
int spectrum_warmup_steps;
|
||||||
|
float spectrum_stop_percent;
|
||||||
} sd_cache_params_t;
|
} sd_cache_params_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
|||||||
130
src/anima.hpp
130
src/anima.hpp
@ -13,9 +13,9 @@
|
|||||||
namespace Anima {
|
namespace Anima {
|
||||||
constexpr int ANIMA_GRAPH_SIZE = 65536;
|
constexpr int ANIMA_GRAPH_SIZE = 65536;
|
||||||
|
|
||||||
__STATIC_INLINE__ struct ggml_tensor* apply_gate(struct ggml_context* ctx,
|
__STATIC_INLINE__ ggml_tensor* apply_gate(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* gate) {
|
ggml_tensor* gate) {
|
||||||
gate = ggml_reshape_3d(ctx, gate, gate->ne[0], 1, gate->ne[1]); // [N, 1, C]
|
gate = ggml_reshape_3d(ctx, gate, gate->ne[0], 1, gate->ne[1]); // [N, 1, C]
|
||||||
return ggml_mul(ctx, x, gate);
|
return ggml_mul(ctx, x, gate);
|
||||||
}
|
}
|
||||||
@ -26,7 +26,7 @@ namespace Anima {
|
|||||||
blocks["proj.1"] = std::make_shared<Linear>(in_dim, out_dim, false);
|
blocks["proj.1"] = std::make_shared<Linear>(in_dim, out_dim, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj.1"]);
|
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj.1"]);
|
||||||
return proj->forward(ctx, x);
|
return proj->forward(ctx, x);
|
||||||
}
|
}
|
||||||
@ -39,7 +39,7 @@ namespace Anima {
|
|||||||
blocks["1.linear_2"] = std::make_shared<Linear>(in_dim, out_dim, false);
|
blocks["1.linear_2"] = std::make_shared<Linear>(in_dim, out_dim, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_1"]);
|
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_1"]);
|
||||||
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_2"]);
|
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_2"]);
|
||||||
|
|
||||||
@ -62,10 +62,10 @@ namespace Anima {
|
|||||||
blocks["2"] = std::make_shared<Linear>(hidden_features, 3 * in_features, false);
|
blocks["2"] = std::make_shared<Linear>(hidden_features, 3 * in_features, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* hidden_states,
|
ggml_tensor* hidden_states,
|
||||||
struct ggml_tensor* embedded_timestep,
|
ggml_tensor* embedded_timestep,
|
||||||
struct ggml_tensor* temb = nullptr) {
|
ggml_tensor* temb = nullptr) {
|
||||||
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
|
||||||
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
|
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
|
||||||
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||||
@ -102,10 +102,10 @@ namespace Anima {
|
|||||||
blocks["2"] = std::make_shared<Linear>(hidden_features, 2 * in_features, false);
|
blocks["2"] = std::make_shared<Linear>(hidden_features, 2 * in_features, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* hidden_states,
|
ggml_tensor* hidden_states,
|
||||||
struct ggml_tensor* embedded_timestep,
|
ggml_tensor* embedded_timestep,
|
||||||
struct ggml_tensor* temb = nullptr) {
|
ggml_tensor* temb = nullptr) {
|
||||||
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
|
||||||
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
|
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
|
||||||
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||||
@ -152,11 +152,11 @@ namespace Anima {
|
|||||||
blocks[this->out_proj_name] = std::make_shared<Linear>(inner_dim, query_dim, false);
|
blocks[this->out_proj_name] = std::make_shared<Linear>(inner_dim, query_dim, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* hidden_states,
|
ggml_tensor* hidden_states,
|
||||||
struct ggml_tensor* encoder_hidden_states = nullptr,
|
ggml_tensor* encoder_hidden_states = nullptr,
|
||||||
struct ggml_tensor* pe_q = nullptr,
|
ggml_tensor* pe_q = nullptr,
|
||||||
struct ggml_tensor* pe_k = nullptr) {
|
ggml_tensor* pe_k = nullptr) {
|
||||||
if (encoder_hidden_states == nullptr) {
|
if (encoder_hidden_states == nullptr) {
|
||||||
encoder_hidden_states = hidden_states;
|
encoder_hidden_states = hidden_states;
|
||||||
}
|
}
|
||||||
@ -183,7 +183,7 @@ namespace Anima {
|
|||||||
q4 = q_norm->forward(ctx, q4);
|
q4 = q_norm->forward(ctx, q4);
|
||||||
k4 = k_norm->forward(ctx, k4);
|
k4 = k_norm->forward(ctx, k4);
|
||||||
|
|
||||||
struct ggml_tensor* attn_out = nullptr;
|
ggml_tensor* attn_out = nullptr;
|
||||||
if (pe_q != nullptr || pe_k != nullptr) {
|
if (pe_q != nullptr || pe_k != nullptr) {
|
||||||
if (pe_q == nullptr) {
|
if (pe_q == nullptr) {
|
||||||
pe_q = pe_k;
|
pe_q = pe_k;
|
||||||
@ -227,7 +227,7 @@ namespace Anima {
|
|||||||
blocks["layer2"] = std::make_shared<Linear>(hidden_dim, dim, false);
|
blocks["layer2"] = std::make_shared<Linear>(hidden_dim, dim, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto layer1 = std::dynamic_pointer_cast<Linear>(blocks["layer1"]);
|
auto layer1 = std::dynamic_pointer_cast<Linear>(blocks["layer1"]);
|
||||||
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["layer2"]);
|
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["layer2"]);
|
||||||
|
|
||||||
@ -245,7 +245,7 @@ namespace Anima {
|
|||||||
blocks["2"] = std::make_shared<Linear>(hidden_dim, dim, true);
|
blocks["2"] = std::make_shared<Linear>(hidden_dim, dim, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto layer0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
|
auto layer0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
|
||||||
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||||
|
|
||||||
@ -267,11 +267,11 @@ namespace Anima {
|
|||||||
blocks["mlp"] = std::make_shared<AdapterMLP>(model_dim, model_dim * 4);
|
blocks["mlp"] = std::make_shared<AdapterMLP>(model_dim, model_dim * 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* target_pe,
|
ggml_tensor* target_pe,
|
||||||
struct ggml_tensor* context_pe) {
|
ggml_tensor* context_pe) {
|
||||||
auto norm_self_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_self_attn"]);
|
auto norm_self_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_self_attn"]);
|
||||||
auto self_attn = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
|
auto self_attn = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
|
||||||
auto norm_cross_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_cross_attn"]);
|
auto norm_cross_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_cross_attn"]);
|
||||||
@ -317,11 +317,11 @@ namespace Anima {
|
|||||||
blocks["norm"] = std::make_shared<RMSNorm>(target_dim, 1e-6f);
|
blocks["norm"] = std::make_shared<RMSNorm>(target_dim, 1e-6f);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* source_hidden_states,
|
ggml_tensor* source_hidden_states,
|
||||||
struct ggml_tensor* target_input_ids,
|
ggml_tensor* target_input_ids,
|
||||||
struct ggml_tensor* target_pe,
|
ggml_tensor* target_pe,
|
||||||
struct ggml_tensor* source_pe) {
|
ggml_tensor* source_pe) {
|
||||||
GGML_ASSERT(target_input_ids != nullptr);
|
GGML_ASSERT(target_input_ids != nullptr);
|
||||||
if (ggml_n_dims(target_input_ids) == 1) {
|
if (ggml_n_dims(target_input_ids) == 1) {
|
||||||
target_input_ids = ggml_reshape_2d(ctx->ggml_ctx, target_input_ids, target_input_ids->ne[0], 1);
|
target_input_ids = ggml_reshape_2d(ctx->ggml_ctx, target_input_ids, target_input_ids->ne[0], 1);
|
||||||
@ -360,12 +360,12 @@ namespace Anima {
|
|||||||
blocks["mlp"] = std::make_shared<AnimaMLP>(hidden_size, hidden_size * mlp_ratio);
|
blocks["mlp"] = std::make_shared<AnimaMLP>(hidden_size, hidden_size * mlp_ratio);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* hidden_states,
|
ggml_tensor* hidden_states,
|
||||||
struct ggml_tensor* encoder_hidden_states,
|
ggml_tensor* encoder_hidden_states,
|
||||||
struct ggml_tensor* embedded_timestep,
|
ggml_tensor* embedded_timestep,
|
||||||
struct ggml_tensor* temb,
|
ggml_tensor* temb,
|
||||||
struct ggml_tensor* image_pe) {
|
ggml_tensor* image_pe) {
|
||||||
auto norm1 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_self_attn"]);
|
auto norm1 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_self_attn"]);
|
||||||
auto attn1 = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
|
auto attn1 = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
|
||||||
auto norm2 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_cross_attn"]);
|
auto norm2 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_cross_attn"]);
|
||||||
@ -402,10 +402,10 @@ namespace Anima {
|
|||||||
blocks["linear"] = std::make_shared<Linear>(hidden_size, patch_size * patch_size * out_channels, false);
|
blocks["linear"] = std::make_shared<Linear>(hidden_size, patch_size * patch_size * out_channels, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* hidden_states,
|
ggml_tensor* hidden_states,
|
||||||
struct ggml_tensor* embedded_timestep,
|
ggml_tensor* embedded_timestep,
|
||||||
struct ggml_tensor* temb) {
|
ggml_tensor* temb) {
|
||||||
auto adaln = std::dynamic_pointer_cast<AdaLayerNorm>(blocks["adaln_modulation"]);
|
auto adaln = std::dynamic_pointer_cast<AdaLayerNorm>(blocks["adaln_modulation"]);
|
||||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
||||||
|
|
||||||
@ -445,15 +445,15 @@ namespace Anima {
|
|||||||
blocks["llm_adapter"] = std::make_shared<LLMAdapter>(1024, 1024, 1024, 6, 16);
|
blocks["llm_adapter"] = std::make_shared<LLMAdapter>(1024, 1024, 1024, 6, 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* encoder_hidden_states,
|
ggml_tensor* encoder_hidden_states,
|
||||||
struct ggml_tensor* image_pe,
|
ggml_tensor* image_pe,
|
||||||
struct ggml_tensor* t5_ids = nullptr,
|
ggml_tensor* t5_ids = nullptr,
|
||||||
struct ggml_tensor* t5_weights = nullptr,
|
ggml_tensor* t5_weights = nullptr,
|
||||||
struct ggml_tensor* adapter_q_pe = nullptr,
|
ggml_tensor* adapter_q_pe = nullptr,
|
||||||
struct ggml_tensor* adapter_k_pe = nullptr) {
|
ggml_tensor* adapter_k_pe = nullptr) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
|
|
||||||
auto x_embedder = std::dynamic_pointer_cast<XEmbedder>(blocks["x_embedder"]);
|
auto x_embedder = std::dynamic_pointer_cast<XEmbedder>(blocks["x_embedder"]);
|
||||||
@ -553,7 +553,7 @@ namespace Anima {
|
|||||||
return "anima";
|
return "anima";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
net.get_param_tensors(tensors, prefix + ".net");
|
net.get_param_tensors(tensors, prefix + ".net");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -602,13 +602,13 @@ namespace Anima {
|
|||||||
return Rope::embed_nd(ids, bs, axis_thetas, axes_dim);
|
return Rope::embed_nd(ids, bs, axis_thetas, axes_dim);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* t5_ids = nullptr,
|
ggml_tensor* t5_ids = nullptr,
|
||||||
struct ggml_tensor* t5_weights = nullptr) {
|
ggml_tensor* t5_weights = nullptr) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
struct ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
timesteps = to_backend(timesteps);
|
timesteps = to_backend(timesteps);
|
||||||
@ -668,14 +668,14 @@ namespace Anima {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* t5_ids = nullptr,
|
ggml_tensor* t5_ids = nullptr,
|
||||||
struct ggml_tensor* t5_weights = nullptr,
|
ggml_tensor* t5_weights = nullptr,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, t5_ids, t5_weights);
|
return build_graph(x, timesteps, context, t5_ids, t5_weights);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||||
|
|||||||
933
src/auto_encoder_kl.hpp
Normal file
933
src/auto_encoder_kl.hpp
Normal file
@ -0,0 +1,933 @@
|
|||||||
|
#ifndef __AUTO_ENCODER_KL_HPP__
|
||||||
|
#define __AUTO_ENCODER_KL_HPP__
|
||||||
|
|
||||||
|
#include "vae.hpp"
|
||||||
|
|
||||||
|
/*================================================== AutoEncoderKL ===================================================*/
|
||||||
|
|
||||||
|
#define VAE_GRAPH_SIZE 20480
|
||||||
|
|
||||||
|
class ResnetBlock : public UnaryBlock {
|
||||||
|
protected:
|
||||||
|
int64_t in_channels;
|
||||||
|
int64_t out_channels;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ResnetBlock(int64_t in_channels,
|
||||||
|
int64_t out_channels)
|
||||||
|
: in_channels(in_channels),
|
||||||
|
out_channels(out_channels) {
|
||||||
|
// temb_channels is always 0
|
||||||
|
blocks["norm1"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(in_channels));
|
||||||
|
blocks["conv1"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
|
|
||||||
|
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(out_channels));
|
||||||
|
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new Conv2d(out_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
|
|
||||||
|
if (out_channels != in_channels) {
|
||||||
|
blocks["nin_shortcut"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, {1, 1}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
|
// x: [N, in_channels, h, w]
|
||||||
|
// t_emb is always None
|
||||||
|
auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]);
|
||||||
|
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv1"]);
|
||||||
|
auto norm2 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm2"]);
|
||||||
|
auto conv2 = std::dynamic_pointer_cast<Conv2d>(blocks["conv2"]);
|
||||||
|
|
||||||
|
auto h = x;
|
||||||
|
h = norm1->forward(ctx, h);
|
||||||
|
h = ggml_silu_inplace(ctx->ggml_ctx, h); // swish
|
||||||
|
h = conv1->forward(ctx, h);
|
||||||
|
// return h;
|
||||||
|
|
||||||
|
h = norm2->forward(ctx, h);
|
||||||
|
h = ggml_silu_inplace(ctx->ggml_ctx, h); // swish
|
||||||
|
// dropout, skip for inference
|
||||||
|
h = conv2->forward(ctx, h);
|
||||||
|
|
||||||
|
// skip connection
|
||||||
|
if (out_channels != in_channels) {
|
||||||
|
auto nin_shortcut = std::dynamic_pointer_cast<Conv2d>(blocks["nin_shortcut"]);
|
||||||
|
|
||||||
|
x = nin_shortcut->forward(ctx, x); // [N, out_channels, h, w]
|
||||||
|
}
|
||||||
|
|
||||||
|
h = ggml_add(ctx->ggml_ctx, h, x);
|
||||||
|
return h; // [N, out_channels, h, w]
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class AttnBlock : public UnaryBlock {
|
||||||
|
protected:
|
||||||
|
int64_t in_channels;
|
||||||
|
bool use_linear;
|
||||||
|
|
||||||
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
||||||
|
auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
|
||||||
|
if (iter != tensor_storage_map.end()) {
|
||||||
|
if (iter->second.n_dims == 4 && use_linear) {
|
||||||
|
use_linear = false;
|
||||||
|
blocks["q"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||||
|
blocks["k"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||||
|
blocks["v"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||||
|
blocks["proj_out"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||||
|
} else if (iter->second.n_dims == 2 && !use_linear) {
|
||||||
|
use_linear = true;
|
||||||
|
blocks["q"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||||
|
blocks["k"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||||
|
blocks["v"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||||
|
blocks["proj_out"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
AttnBlock(int64_t in_channels, bool use_linear)
|
||||||
|
: in_channels(in_channels), use_linear(use_linear) {
|
||||||
|
blocks["norm"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(in_channels));
|
||||||
|
if (use_linear) {
|
||||||
|
blocks["q"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||||
|
blocks["k"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||||
|
blocks["v"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||||
|
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||||
|
} else {
|
||||||
|
blocks["q"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||||
|
blocks["k"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||||
|
blocks["v"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||||
|
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
|
// x: [N, in_channels, h, w]
|
||||||
|
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
|
||||||
|
auto q_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["q"]);
|
||||||
|
auto k_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["k"]);
|
||||||
|
auto v_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["v"]);
|
||||||
|
auto proj_out = std::dynamic_pointer_cast<UnaryBlock>(blocks["proj_out"]);
|
||||||
|
|
||||||
|
auto h_ = norm->forward(ctx, x);
|
||||||
|
|
||||||
|
const int64_t n = h_->ne[3];
|
||||||
|
const int64_t c = h_->ne[2];
|
||||||
|
const int64_t h = h_->ne[1];
|
||||||
|
const int64_t w = h_->ne[0];
|
||||||
|
|
||||||
|
ggml_tensor* q;
|
||||||
|
ggml_tensor* k;
|
||||||
|
ggml_tensor* v;
|
||||||
|
if (use_linear) {
|
||||||
|
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||||
|
h_ = ggml_reshape_3d(ctx->ggml_ctx, h_, c, h * w, n); // [N, h * w, in_channels]
|
||||||
|
|
||||||
|
q = q_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
||||||
|
k = k_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
||||||
|
v = v_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
||||||
|
} else {
|
||||||
|
q = q_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
||||||
|
q = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, q, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||||
|
q = ggml_reshape_3d(ctx->ggml_ctx, q, c, h * w, n); // [N, h * w, in_channels]
|
||||||
|
|
||||||
|
k = k_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
||||||
|
k = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, k, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||||
|
k = ggml_reshape_3d(ctx->ggml_ctx, k, c, h * w, n); // [N, h * w, in_channels]
|
||||||
|
|
||||||
|
v = v_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
||||||
|
v = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, v, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||||
|
v = ggml_reshape_3d(ctx->ggml_ctx, v, c, h * w, n); // [N, h * w, in_channels]
|
||||||
|
}
|
||||||
|
|
||||||
|
h_ = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled);
|
||||||
|
|
||||||
|
if (use_linear) {
|
||||||
|
h_ = proj_out->forward(ctx, h_); // [N, h * w, in_channels]
|
||||||
|
|
||||||
|
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 0, 2, 3)); // [N, in_channels, h * w]
|
||||||
|
h_ = ggml_reshape_4d(ctx->ggml_ctx, h_, w, h, c, n); // [N, in_channels, h, w]
|
||||||
|
} else {
|
||||||
|
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 0, 2, 3)); // [N, in_channels, h * w]
|
||||||
|
h_ = ggml_reshape_4d(ctx->ggml_ctx, h_, w, h, c, n); // [N, in_channels, h, w]
|
||||||
|
|
||||||
|
h_ = proj_out->forward(ctx, h_); // [N, in_channels, h, w]
|
||||||
|
}
|
||||||
|
|
||||||
|
h_ = ggml_add(ctx->ggml_ctx, h_, x);
|
||||||
|
return h_;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class AE3DConv : public Conv2d {
|
||||||
|
public:
|
||||||
|
AE3DConv(int64_t in_channels,
|
||||||
|
int64_t out_channels,
|
||||||
|
std::pair<int, int> kernel_size,
|
||||||
|
int video_kernel_size = 3,
|
||||||
|
std::pair<int, int> stride = {1, 1},
|
||||||
|
std::pair<int, int> padding = {0, 0},
|
||||||
|
std::pair<int, int> dilation = {1, 1},
|
||||||
|
bool bias = true)
|
||||||
|
: Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias) {
|
||||||
|
int kernel_padding = video_kernel_size / 2;
|
||||||
|
blocks["time_mix_conv"] = std::shared_ptr<GGMLBlock>(new Conv3d(out_channels,
|
||||||
|
out_channels,
|
||||||
|
{video_kernel_size, 1, 1},
|
||||||
|
{1, 1, 1},
|
||||||
|
{kernel_padding, 0, 0}));
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
|
ggml_tensor* x) override {
|
||||||
|
// timesteps always None
|
||||||
|
// skip_video always False
|
||||||
|
// x: [N, IC, IH, IW]
|
||||||
|
// result: [N, OC, OH, OW]
|
||||||
|
auto time_mix_conv = std::dynamic_pointer_cast<Conv3d>(blocks["time_mix_conv"]);
|
||||||
|
|
||||||
|
x = Conv2d::forward(ctx, x);
|
||||||
|
// timesteps = x.shape[0]
|
||||||
|
// x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
|
||||||
|
// x = conv3d(x)
|
||||||
|
// return rearrange(x, "b c t h w -> (b t) c h w")
|
||||||
|
int64_t T = x->ne[3];
|
||||||
|
int64_t B = x->ne[3] / T;
|
||||||
|
int64_t C = x->ne[2];
|
||||||
|
int64_t H = x->ne[1];
|
||||||
|
int64_t W = x->ne[0];
|
||||||
|
|
||||||
|
x = ggml_reshape_4d(ctx->ggml_ctx, x, W * H, C, T, B); // (b t) c h w -> b t c (h w)
|
||||||
|
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b t c (h w) -> b c t (h w)
|
||||||
|
x = time_mix_conv->forward(ctx, x); // [B, OC, T, OH * OW]
|
||||||
|
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
|
||||||
|
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w
|
||||||
|
return x; // [B*T, OC, OH, OW]
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class VideoResnetBlock : public ResnetBlock {
|
||||||
|
protected:
|
||||||
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
|
enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_storage_map, GGML_TYPE_F32);
|
||||||
|
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
float get_alpha() {
|
||||||
|
float alpha = ggml_ext_backend_tensor_get_f32(params["mix_factor"]);
|
||||||
|
return sigmoid(alpha);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
VideoResnetBlock(int64_t in_channels,
|
||||||
|
int64_t out_channels,
|
||||||
|
int video_kernel_size = 3)
|
||||||
|
: ResnetBlock(in_channels, out_channels) {
|
||||||
|
// merge_strategy is always learned
|
||||||
|
blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
|
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w]
|
||||||
|
// return: [N, out_channels, h, w] aka [b*t, out_channels, h, w]
|
||||||
|
// t_emb is always None
|
||||||
|
// skip_video is always False
|
||||||
|
// timesteps is always None
|
||||||
|
auto time_stack = std::dynamic_pointer_cast<ResBlock>(blocks["time_stack"]);
|
||||||
|
|
||||||
|
x = ResnetBlock::forward(ctx, x); // [N, out_channels, h, w]
|
||||||
|
// return x;
|
||||||
|
|
||||||
|
int64_t T = x->ne[3];
|
||||||
|
int64_t B = x->ne[3] / T;
|
||||||
|
int64_t C = x->ne[2];
|
||||||
|
int64_t H = x->ne[1];
|
||||||
|
int64_t W = x->ne[0];
|
||||||
|
|
||||||
|
x = ggml_reshape_4d(ctx->ggml_ctx, x, W * H, C, T, B); // (b t) c h w -> b t c (h w)
|
||||||
|
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b t c (h w) -> b c t (h w)
|
||||||
|
auto x_mix = x;
|
||||||
|
|
||||||
|
x = time_stack->forward(ctx, x); // b t c (h w)
|
||||||
|
|
||||||
|
float alpha = get_alpha();
|
||||||
|
x = ggml_add(ctx->ggml_ctx,
|
||||||
|
ggml_ext_scale(ctx->ggml_ctx, x, alpha),
|
||||||
|
ggml_ext_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha));
|
||||||
|
|
||||||
|
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
|
||||||
|
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w
|
||||||
|
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// ldm.modules.diffusionmodules.model.Encoder
|
||||||
|
class Encoder : public GGMLBlock {
|
||||||
|
protected:
|
||||||
|
int ch = 128;
|
||||||
|
std::vector<int> ch_mult = {1, 2, 4, 4};
|
||||||
|
int num_res_blocks = 2;
|
||||||
|
int in_channels = 3;
|
||||||
|
int z_channels = 4;
|
||||||
|
bool double_z = true;
|
||||||
|
|
||||||
|
public:
|
||||||
|
Encoder(int ch,
|
||||||
|
std::vector<int> ch_mult,
|
||||||
|
int num_res_blocks,
|
||||||
|
int in_channels,
|
||||||
|
int z_channels,
|
||||||
|
bool double_z = true,
|
||||||
|
bool use_linear_projection = false)
|
||||||
|
: ch(ch),
|
||||||
|
ch_mult(ch_mult),
|
||||||
|
num_res_blocks(num_res_blocks),
|
||||||
|
in_channels(in_channels),
|
||||||
|
z_channels(z_channels),
|
||||||
|
double_z(double_z) {
|
||||||
|
blocks["conv_in"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, ch, {3, 3}, {1, 1}, {1, 1}));
|
||||||
|
|
||||||
|
size_t num_resolutions = ch_mult.size();
|
||||||
|
|
||||||
|
int block_in = 1;
|
||||||
|
for (int i = 0; i < num_resolutions; i++) {
|
||||||
|
if (i == 0) {
|
||||||
|
block_in = ch;
|
||||||
|
} else {
|
||||||
|
block_in = ch * ch_mult[i - 1];
|
||||||
|
}
|
||||||
|
int block_out = ch * ch_mult[i];
|
||||||
|
for (int j = 0; j < num_res_blocks; j++) {
|
||||||
|
std::string name = "down." + std::to_string(i) + ".block." + std::to_string(j);
|
||||||
|
blocks[name] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_out));
|
||||||
|
block_in = block_out;
|
||||||
|
}
|
||||||
|
if (i != num_resolutions - 1) {
|
||||||
|
std::string name = "down." + std::to_string(i) + ".downsample";
|
||||||
|
blocks[name] = std::shared_ptr<GGMLBlock>(new DownSampleBlock(block_in, block_in, true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks["mid.block_1"] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_in));
|
||||||
|
blocks["mid.attn_1"] = std::shared_ptr<GGMLBlock>(new AttnBlock(block_in, use_linear_projection));
|
||||||
|
blocks["mid.block_2"] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_in));
|
||||||
|
|
||||||
|
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(block_in));
|
||||||
|
blocks["conv_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(block_in, double_z ? z_channels * 2 : z_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
|
// x: [N, in_channels, h, w]
|
||||||
|
|
||||||
|
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
|
||||||
|
auto mid_block_1 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_1"]);
|
||||||
|
auto mid_attn_1 = std::dynamic_pointer_cast<AttnBlock>(blocks["mid.attn_1"]);
|
||||||
|
auto mid_block_2 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_2"]);
|
||||||
|
auto norm_out = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm_out"]);
|
||||||
|
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
||||||
|
|
||||||
|
auto h = conv_in->forward(ctx, x); // [N, ch, h, w]
|
||||||
|
|
||||||
|
// downsampling
|
||||||
|
size_t num_resolutions = ch_mult.size();
|
||||||
|
for (int i = 0; i < num_resolutions; i++) {
|
||||||
|
for (int j = 0; j < num_res_blocks; j++) {
|
||||||
|
std::string name = "down." + std::to_string(i) + ".block." + std::to_string(j);
|
||||||
|
auto down_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
||||||
|
|
||||||
|
h = down_block->forward(ctx, h);
|
||||||
|
}
|
||||||
|
if (i != num_resolutions - 1) {
|
||||||
|
std::string name = "down." + std::to_string(i) + ".downsample";
|
||||||
|
auto down_sample = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
||||||
|
|
||||||
|
h = down_sample->forward(ctx, h);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// middle
|
||||||
|
h = mid_block_1->forward(ctx, h);
|
||||||
|
h = mid_attn_1->forward(ctx, h);
|
||||||
|
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
||||||
|
|
||||||
|
// end
|
||||||
|
h = norm_out->forward(ctx, h);
|
||||||
|
h = ggml_silu_inplace(ctx->ggml_ctx, h); // nonlinearity/swish
|
||||||
|
h = conv_out->forward(ctx, h); // [N, z_channels*2, h, w]
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// ldm.modules.diffusionmodules.model.Decoder
|
||||||
|
class Decoder : public GGMLBlock {
|
||||||
|
protected:
|
||||||
|
int ch = 128;
|
||||||
|
int out_ch = 3;
|
||||||
|
std::vector<int> ch_mult = {1, 2, 4, 4};
|
||||||
|
int num_res_blocks = 2;
|
||||||
|
int z_channels = 4;
|
||||||
|
bool video_decoder = false;
|
||||||
|
int video_kernel_size = 3;
|
||||||
|
|
||||||
|
virtual std::shared_ptr<GGMLBlock> get_conv_out(int64_t in_channels,
|
||||||
|
int64_t out_channels,
|
||||||
|
std::pair<int, int> kernel_size,
|
||||||
|
std::pair<int, int> stride = {1, 1},
|
||||||
|
std::pair<int, int> padding = {0, 0}) {
|
||||||
|
if (video_decoder) {
|
||||||
|
return std::shared_ptr<GGMLBlock>(new AE3DConv(in_channels, out_channels, kernel_size, video_kernel_size, stride, padding));
|
||||||
|
} else {
|
||||||
|
return std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, kernel_size, stride, padding));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual std::shared_ptr<GGMLBlock> get_resnet_block(int64_t in_channels,
|
||||||
|
int64_t out_channels) {
|
||||||
|
if (video_decoder) {
|
||||||
|
return std::shared_ptr<GGMLBlock>(new VideoResnetBlock(in_channels, out_channels, video_kernel_size));
|
||||||
|
} else {
|
||||||
|
return std::shared_ptr<GGMLBlock>(new ResnetBlock(in_channels, out_channels));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
Decoder(int ch,
|
||||||
|
int out_ch,
|
||||||
|
std::vector<int> ch_mult,
|
||||||
|
int num_res_blocks,
|
||||||
|
int z_channels,
|
||||||
|
bool use_linear_projection = false,
|
||||||
|
bool video_decoder = false,
|
||||||
|
int video_kernel_size = 3)
|
||||||
|
: ch(ch),
|
||||||
|
out_ch(out_ch),
|
||||||
|
ch_mult(ch_mult),
|
||||||
|
num_res_blocks(num_res_blocks),
|
||||||
|
z_channels(z_channels),
|
||||||
|
video_decoder(video_decoder),
|
||||||
|
video_kernel_size(video_kernel_size) {
|
||||||
|
int num_resolutions = static_cast<int>(ch_mult.size());
|
||||||
|
int block_in = ch * ch_mult[num_resolutions - 1];
|
||||||
|
|
||||||
|
blocks["conv_in"] = std::shared_ptr<GGMLBlock>(new Conv2d(z_channels, block_in, {3, 3}, {1, 1}, {1, 1}));
|
||||||
|
|
||||||
|
blocks["mid.block_1"] = get_resnet_block(block_in, block_in);
|
||||||
|
blocks["mid.attn_1"] = std::shared_ptr<GGMLBlock>(new AttnBlock(block_in, use_linear_projection));
|
||||||
|
blocks["mid.block_2"] = get_resnet_block(block_in, block_in);
|
||||||
|
|
||||||
|
for (int i = num_resolutions - 1; i >= 0; i--) {
|
||||||
|
int mult = ch_mult[i];
|
||||||
|
int block_out = ch * mult;
|
||||||
|
for (int j = 0; j < num_res_blocks + 1; j++) {
|
||||||
|
std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j);
|
||||||
|
blocks[name] = get_resnet_block(block_in, block_out);
|
||||||
|
|
||||||
|
block_in = block_out;
|
||||||
|
}
|
||||||
|
if (i != 0) {
|
||||||
|
std::string name = "up." + std::to_string(i) + ".upsample";
|
||||||
|
blocks[name] = std::shared_ptr<GGMLBlock>(new UpSampleBlock(block_in, block_in));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(block_in));
|
||||||
|
blocks["conv_out"] = get_conv_out(block_in, out_ch, {3, 3}, {1, 1}, {1, 1});
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||||
|
// z: [N, z_channels, h, w]
|
||||||
|
// alpha is always 0
|
||||||
|
// merge_strategy is always learned
|
||||||
|
// time_mode is always conv-only, so we need to replace conv_out_op/resnet_op to AE3DConv/VideoResBlock
|
||||||
|
// AttnVideoBlock will not be used
|
||||||
|
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
|
||||||
|
auto mid_block_1 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_1"]);
|
||||||
|
auto mid_attn_1 = std::dynamic_pointer_cast<AttnBlock>(blocks["mid.attn_1"]);
|
||||||
|
auto mid_block_2 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_2"]);
|
||||||
|
auto norm_out = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm_out"]);
|
||||||
|
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
||||||
|
|
||||||
|
// conv_in
|
||||||
|
auto h = conv_in->forward(ctx, z); // [N, block_in, h, w]
|
||||||
|
|
||||||
|
// middle
|
||||||
|
h = mid_block_1->forward(ctx, h);
|
||||||
|
// return h;
|
||||||
|
|
||||||
|
h = mid_attn_1->forward(ctx, h);
|
||||||
|
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
||||||
|
|
||||||
|
// upsampling
|
||||||
|
int num_resolutions = static_cast<int>(ch_mult.size());
|
||||||
|
for (int i = num_resolutions - 1; i >= 0; i--) {
|
||||||
|
for (int j = 0; j < num_res_blocks + 1; j++) {
|
||||||
|
std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j);
|
||||||
|
auto up_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
||||||
|
|
||||||
|
h = up_block->forward(ctx, h);
|
||||||
|
}
|
||||||
|
if (i != 0) {
|
||||||
|
std::string name = "up." + std::to_string(i) + ".upsample";
|
||||||
|
auto up_sample = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]);
|
||||||
|
|
||||||
|
h = up_sample->forward(ctx, h);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h = norm_out->forward(ctx, h);
|
||||||
|
h = ggml_silu_inplace(ctx->ggml_ctx, h); // nonlinearity/swish
|
||||||
|
h = conv_out->forward(ctx, h); // [N, out_ch, h*8, w*8]
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// ldm.models.autoencoder.AutoencoderKL
|
||||||
|
class AutoEncoderKLModel : public GGMLBlock {
|
||||||
|
protected:
|
||||||
|
SDVersion version;
|
||||||
|
bool decode_only = true;
|
||||||
|
bool use_video_decoder = false;
|
||||||
|
bool use_quant = true;
|
||||||
|
int embed_dim = 4;
|
||||||
|
struct {
|
||||||
|
int z_channels = 4;
|
||||||
|
int resolution = 256;
|
||||||
|
int in_channels = 3;
|
||||||
|
int out_ch = 3;
|
||||||
|
int ch = 128;
|
||||||
|
std::vector<int> ch_mult = {1, 2, 4, 4};
|
||||||
|
int num_res_blocks = 2;
|
||||||
|
bool double_z = true;
|
||||||
|
} dd_config;
|
||||||
|
|
||||||
|
public:
|
||||||
|
AutoEncoderKLModel(SDVersion version = VERSION_SD1,
|
||||||
|
bool decode_only = true,
|
||||||
|
bool use_linear_projection = false,
|
||||||
|
bool use_video_decoder = false)
|
||||||
|
: version(version), decode_only(decode_only), use_video_decoder(use_video_decoder) {
|
||||||
|
if (sd_version_is_dit(version)) {
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
dd_config.z_channels = 32;
|
||||||
|
embed_dim = 32;
|
||||||
|
} else {
|
||||||
|
use_quant = false;
|
||||||
|
dd_config.z_channels = 16;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (use_video_decoder) {
|
||||||
|
use_quant = false;
|
||||||
|
}
|
||||||
|
blocks["decoder"] = std::shared_ptr<GGMLBlock>(new Decoder(dd_config.ch,
|
||||||
|
dd_config.out_ch,
|
||||||
|
dd_config.ch_mult,
|
||||||
|
dd_config.num_res_blocks,
|
||||||
|
dd_config.z_channels,
|
||||||
|
use_linear_projection,
|
||||||
|
use_video_decoder));
|
||||||
|
if (use_quant) {
|
||||||
|
blocks["post_quant_conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(dd_config.z_channels,
|
||||||
|
embed_dim,
|
||||||
|
{1, 1}));
|
||||||
|
}
|
||||||
|
if (!decode_only) {
|
||||||
|
blocks["encoder"] = std::shared_ptr<GGMLBlock>(new Encoder(dd_config.ch,
|
||||||
|
dd_config.ch_mult,
|
||||||
|
dd_config.num_res_blocks,
|
||||||
|
dd_config.in_channels,
|
||||||
|
dd_config.z_channels,
|
||||||
|
dd_config.double_z,
|
||||||
|
use_linear_projection));
|
||||||
|
if (use_quant) {
|
||||||
|
int factor = dd_config.double_z ? 2 : 1;
|
||||||
|
|
||||||
|
blocks["quant_conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(embed_dim * factor,
|
||||||
|
dd_config.z_channels * factor,
|
||||||
|
{1, 1}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||||
|
// z: [N, z_channels, h, w]
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
// [N, C*p*p, h, w] -> [N, C, h*p, w*p]
|
||||||
|
int64_t p = 2;
|
||||||
|
|
||||||
|
int64_t N = z->ne[3];
|
||||||
|
int64_t C = z->ne[2] / p / p;
|
||||||
|
int64_t h = z->ne[1];
|
||||||
|
int64_t w = z->ne[0];
|
||||||
|
int64_t H = h * p;
|
||||||
|
int64_t W = w * p;
|
||||||
|
|
||||||
|
z = ggml_reshape_4d(ctx->ggml_ctx, z, w * h, p * p, C, N); // [N, C, p*p, h*w]
|
||||||
|
z = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, z, 1, 0, 2, 3)); // [N, C, h*w, p*p]
|
||||||
|
z = ggml_reshape_4d(ctx->ggml_ctx, z, p, p, w, h * C * N); // [N*C*h, w, p, p]
|
||||||
|
z = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, z, 0, 2, 1, 3)); // [N*C*h, p, w, p]
|
||||||
|
z = ggml_reshape_4d(ctx->ggml_ctx, z, W, H, C, N); // [N, C, h*p, w*p]
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_quant) {
|
||||||
|
auto post_quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["post_quant_conv"]);
|
||||||
|
z = post_quant_conv->forward(ctx, z); // [N, z_channels, h, w]
|
||||||
|
}
|
||||||
|
auto decoder = std::dynamic_pointer_cast<Decoder>(blocks["decoder"]);
|
||||||
|
|
||||||
|
ggml_set_name(z, "bench-start");
|
||||||
|
auto h = decoder->forward(ctx, z);
|
||||||
|
ggml_set_name(h, "bench-end");
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
|
// x: [N, in_channels, h, w]
|
||||||
|
auto encoder = std::dynamic_pointer_cast<Encoder>(blocks["encoder"]);
|
||||||
|
|
||||||
|
auto z = encoder->forward(ctx, x); // [N, 2*z_channels, h/8, w/8]
|
||||||
|
if (use_quant) {
|
||||||
|
auto quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["quant_conv"]);
|
||||||
|
z = quant_conv->forward(ctx, z); // [N, 2*embed_dim, h/8, w/8]
|
||||||
|
}
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
z = ggml_ext_chunk(ctx->ggml_ctx, z, 2, 2)[0];
|
||||||
|
|
||||||
|
// [N, C, H, W] -> [N, C*p*p, H/p, W/p]
|
||||||
|
int64_t p = 2;
|
||||||
|
int64_t N = z->ne[3];
|
||||||
|
int64_t C = z->ne[2];
|
||||||
|
int64_t H = z->ne[1];
|
||||||
|
int64_t W = z->ne[0];
|
||||||
|
int64_t h = H / p;
|
||||||
|
int64_t w = W / p;
|
||||||
|
|
||||||
|
z = ggml_reshape_4d(ctx->ggml_ctx, z, p, w, p, h * C * N); // [N*C*h, p, w, p]
|
||||||
|
z = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, z, 0, 2, 1, 3)); // [N*C*h, w, p, p]
|
||||||
|
z = ggml_reshape_4d(ctx->ggml_ctx, z, p * p, w * h, C, N); // [N, C, h*w, p*p]
|
||||||
|
z = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, z, 1, 0, 2, 3)); // [N, C, p*p, h*w]
|
||||||
|
z = ggml_reshape_4d(ctx->ggml_ctx, z, w, h, p * p * C, N); // [N, C*p*p, h*w]
|
||||||
|
}
|
||||||
|
return z;
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_encoder_output_channels() {
|
||||||
|
int factor = dd_config.double_z ? 2 : 1;
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
return dd_config.z_channels * 4;
|
||||||
|
}
|
||||||
|
return dd_config.z_channels * factor;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct AutoEncoderKL : public VAE {
|
||||||
|
float scale_factor = 1.f;
|
||||||
|
float shift_factor = 0.f;
|
||||||
|
bool decode_only = true;
|
||||||
|
AutoEncoderKLModel ae;
|
||||||
|
|
||||||
|
AutoEncoderKL(ggml_backend_t backend,
|
||||||
|
bool offload_params_to_cpu,
|
||||||
|
const String2TensorStorage& tensor_storage_map,
|
||||||
|
const std::string prefix,
|
||||||
|
bool decode_only = false,
|
||||||
|
bool use_video_decoder = false,
|
||||||
|
SDVersion version = VERSION_SD1)
|
||||||
|
: decode_only(decode_only), VAE(version, backend, offload_params_to_cpu) {
|
||||||
|
if (sd_version_is_sd1(version) || sd_version_is_sd2(version)) {
|
||||||
|
scale_factor = 0.18215f;
|
||||||
|
shift_factor = 0.f;
|
||||||
|
} else if (sd_version_is_sdxl(version)) {
|
||||||
|
scale_factor = 0.13025f;
|
||||||
|
shift_factor = 0.f;
|
||||||
|
} else if (sd_version_is_sd3(version)) {
|
||||||
|
scale_factor = 1.5305f;
|
||||||
|
shift_factor = 0.0609f;
|
||||||
|
} else if (sd_version_is_flux(version) || sd_version_is_z_image(version)) {
|
||||||
|
scale_factor = 0.3611f;
|
||||||
|
shift_factor = 0.1159f;
|
||||||
|
} else if (sd_version_is_flux2(version)) {
|
||||||
|
scale_factor = 1.0f;
|
||||||
|
shift_factor = 0.f;
|
||||||
|
}
|
||||||
|
bool use_linear_projection = false;
|
||||||
|
for (const auto& [name, tensor_storage] : tensor_storage_map) {
|
||||||
|
if (!starts_with(name, prefix)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (ends_with(name, "attn_1.proj_out.weight")) {
|
||||||
|
if (tensor_storage.n_dims == 2) {
|
||||||
|
use_linear_projection = true;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ae = AutoEncoderKLModel(version, decode_only, use_linear_projection, use_video_decoder);
|
||||||
|
ae.init(params_ctx, tensor_storage_map, prefix);
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_conv2d_scale(float scale) override {
|
||||||
|
std::vector<GGMLBlock*> blocks;
|
||||||
|
ae.get_all_blocks(blocks);
|
||||||
|
for (auto block : blocks) {
|
||||||
|
if (block->get_desc() == "Conv2d") {
|
||||||
|
auto conv_block = (Conv2d*)block;
|
||||||
|
conv_block->set_scale(scale);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string get_desc() override {
|
||||||
|
return "vae";
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {
|
||||||
|
ae.get_param_tensors(tensors, prefix);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
|
||||||
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
|
||||||
|
z = to_backend(z);
|
||||||
|
|
||||||
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
|
ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
||||||
|
|
||||||
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
|
return gf;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool _compute(const int n_threads,
|
||||||
|
ggml_tensor* z,
|
||||||
|
bool decode_graph,
|
||||||
|
ggml_tensor** output,
|
||||||
|
ggml_context* output_ctx = nullptr) override {
|
||||||
|
GGML_ASSERT(!decode_only || decode_graph);
|
||||||
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
|
return build_graph(z, decode_graph);
|
||||||
|
};
|
||||||
|
// ggml_set_f32(z, 0.5f);
|
||||||
|
// print_ggml_tensor(z);
|
||||||
|
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* gaussian_latent_sample(ggml_context* work_ctx, ggml_tensor* moments, std::shared_ptr<RNG> rng) {
|
||||||
|
// ldm.modules.distributions.distributions.DiagonalGaussianDistribution.sample
|
||||||
|
ggml_tensor* latents = ggml_new_tensor_4d(work_ctx, moments->type, moments->ne[0], moments->ne[1], moments->ne[2] / 2, moments->ne[3]);
|
||||||
|
ggml_tensor* noise = ggml_dup_tensor(work_ctx, latents);
|
||||||
|
ggml_ext_im_set_randn_f32(noise, rng);
|
||||||
|
{
|
||||||
|
float mean = 0;
|
||||||
|
float logvar = 0;
|
||||||
|
float value = 0;
|
||||||
|
float std_ = 0;
|
||||||
|
for (int i = 0; i < latents->ne[3]; i++) {
|
||||||
|
for (int j = 0; j < latents->ne[2]; j++) {
|
||||||
|
for (int k = 0; k < latents->ne[1]; k++) {
|
||||||
|
for (int l = 0; l < latents->ne[0]; l++) {
|
||||||
|
mean = ggml_ext_tensor_get_f32(moments, l, k, j, i);
|
||||||
|
logvar = ggml_ext_tensor_get_f32(moments, l, k, j + (int)latents->ne[2], i);
|
||||||
|
logvar = std::max(-30.0f, std::min(logvar, 20.0f));
|
||||||
|
std_ = std::exp(0.5f * logvar);
|
||||||
|
value = mean + std_ * ggml_ext_tensor_get_f32(noise, l, k, j, i);
|
||||||
|
// printf("%d %d %d %d -> %f\n", i, j, k, l, value);
|
||||||
|
ggml_ext_tensor_set_f32(latents, value, l, k, j, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return latents;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
return vae_output;
|
||||||
|
} else if (version == VERSION_SD1_PIX2PIX) {
|
||||||
|
return ggml_view_3d(work_ctx,
|
||||||
|
vae_output,
|
||||||
|
vae_output->ne[0],
|
||||||
|
vae_output->ne[1],
|
||||||
|
vae_output->ne[2] / 2,
|
||||||
|
vae_output->nb[1],
|
||||||
|
vae_output->nb[2],
|
||||||
|
0);
|
||||||
|
} else {
|
||||||
|
return gaussian_latent_sample(work_ctx, vae_output, rng);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_latents_mean_std_vec(ggml_tensor* latents, int channel_dim, std::vector<float>& latents_mean_vec, std::vector<float>& latents_std_vec) {
|
||||||
|
// flux2
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
GGML_ASSERT(latents->ne[channel_dim] == 128);
|
||||||
|
latents_mean_vec = {-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f,
|
||||||
|
-0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f,
|
||||||
|
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f,
|
||||||
|
0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f,
|
||||||
|
-0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f,
|
||||||
|
0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f,
|
||||||
|
0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f,
|
||||||
|
0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f,
|
||||||
|
-0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f,
|
||||||
|
0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f,
|
||||||
|
-0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f,
|
||||||
|
0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f,
|
||||||
|
-0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f,
|
||||||
|
0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f,
|
||||||
|
-0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f,
|
||||||
|
-0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f};
|
||||||
|
latents_std_vec = {
|
||||||
|
1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f,
|
||||||
|
1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f,
|
||||||
|
1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f,
|
||||||
|
1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f,
|
||||||
|
1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f,
|
||||||
|
1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f,
|
||||||
|
1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f,
|
||||||
|
1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f,
|
||||||
|
1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f,
|
||||||
|
1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f,
|
||||||
|
1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f,
|
||||||
|
1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f,
|
||||||
|
1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f,
|
||||||
|
1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
|
||||||
|
1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
|
||||||
|
1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f};
|
||||||
|
} else {
|
||||||
|
GGML_ABORT("unknown version %d", version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
ggml_tensor* vae_latents = ggml_dup(work_ctx, latents);
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
int channel_dim = 2;
|
||||||
|
std::vector<float> latents_mean_vec;
|
||||||
|
std::vector<float> latents_std_vec;
|
||||||
|
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
|
||||||
|
|
||||||
|
float mean;
|
||||||
|
float std_;
|
||||||
|
for (int i = 0; i < latents->ne[3]; i++) {
|
||||||
|
if (channel_dim == 3) {
|
||||||
|
mean = latents_mean_vec[i];
|
||||||
|
std_ = latents_std_vec[i];
|
||||||
|
}
|
||||||
|
for (int j = 0; j < latents->ne[2]; j++) {
|
||||||
|
if (channel_dim == 2) {
|
||||||
|
mean = latents_mean_vec[j];
|
||||||
|
std_ = latents_std_vec[j];
|
||||||
|
}
|
||||||
|
for (int k = 0; k < latents->ne[1]; k++) {
|
||||||
|
for (int l = 0; l < latents->ne[0]; l++) {
|
||||||
|
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
|
||||||
|
value = value * std_ / scale_factor + mean;
|
||||||
|
ggml_ext_tensor_set_f32(vae_latents, value, l, k, j, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ggml_ext_tensor_iter(latents, [&](ggml_tensor* latents, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
|
||||||
|
float value = ggml_ext_tensor_get_f32(latents, i0, i1, i2, i3);
|
||||||
|
value = (value / scale_factor) + shift_factor;
|
||||||
|
ggml_ext_tensor_set_f32(vae_latents, value, i0, i1, i2, i3);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return vae_latents;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
ggml_tensor* diffusion_latents = ggml_dup(work_ctx, latents);
|
||||||
|
if (sd_version_is_flux2(version)) {
|
||||||
|
int channel_dim = 2;
|
||||||
|
std::vector<float> latents_mean_vec;
|
||||||
|
std::vector<float> latents_std_vec;
|
||||||
|
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
|
||||||
|
|
||||||
|
float mean;
|
||||||
|
float std_;
|
||||||
|
for (int i = 0; i < latents->ne[3]; i++) {
|
||||||
|
if (channel_dim == 3) {
|
||||||
|
mean = latents_mean_vec[i];
|
||||||
|
std_ = latents_std_vec[i];
|
||||||
|
}
|
||||||
|
for (int j = 0; j < latents->ne[2]; j++) {
|
||||||
|
if (channel_dim == 2) {
|
||||||
|
mean = latents_mean_vec[j];
|
||||||
|
std_ = latents_std_vec[j];
|
||||||
|
}
|
||||||
|
for (int k = 0; k < latents->ne[1]; k++) {
|
||||||
|
for (int l = 0; l < latents->ne[0]; l++) {
|
||||||
|
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
|
||||||
|
value = (value - mean) * scale_factor / std_;
|
||||||
|
ggml_ext_tensor_set_f32(diffusion_latents, value, l, k, j, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ggml_ext_tensor_iter(latents, [&](ggml_tensor* latents, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
|
||||||
|
float value = ggml_ext_tensor_get_f32(latents, i0, i1, i2, i3);
|
||||||
|
value = (value - shift_factor) * scale_factor;
|
||||||
|
ggml_ext_tensor_set_f32(diffusion_latents, value, i0, i1, i2, i3);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return diffusion_latents;
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_encoder_output_channels(int input_channels) {
|
||||||
|
return ae.get_encoder_output_channels();
|
||||||
|
}
|
||||||
|
|
||||||
|
void test() {
|
||||||
|
ggml_init_params params;
|
||||||
|
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||||
|
params.mem_buffer = nullptr;
|
||||||
|
params.no_alloc = false;
|
||||||
|
|
||||||
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
|
{
|
||||||
|
// CPU, x{1, 3, 64, 64}: Pass
|
||||||
|
// CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan
|
||||||
|
// CPU, x{2, 3, 64, 64}: Wrong result
|
||||||
|
// CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result
|
||||||
|
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2);
|
||||||
|
ggml_set_f32(x, 0.5f);
|
||||||
|
print_ggml_tensor(x);
|
||||||
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
|
int64_t t0 = ggml_time_ms();
|
||||||
|
_compute(8, x, false, &out, work_ctx);
|
||||||
|
int64_t t1 = ggml_time_ms();
|
||||||
|
|
||||||
|
print_ggml_tensor(out);
|
||||||
|
LOG_DEBUG("encode test done in %lldms", t1 - t0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (false) {
|
||||||
|
// CPU, z{1, 4, 8, 8}: Pass
|
||||||
|
// CUDA, z{1, 4, 8, 8}: Pass
|
||||||
|
// CPU, z{3, 4, 8, 8}: Wrong result
|
||||||
|
// CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result
|
||||||
|
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1);
|
||||||
|
ggml_set_f32(z, 0.5f);
|
||||||
|
print_ggml_tensor(z);
|
||||||
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
|
int64_t t0 = ggml_time_ms();
|
||||||
|
_compute(8, z, true, &out, work_ctx);
|
||||||
|
int64_t t1 = ggml_time_ms();
|
||||||
|
|
||||||
|
print_ggml_tensor(out);
|
||||||
|
LOG_DEBUG("decode test done in %lldms", t1 - t0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // __AUTO_ENCODER_KL_HPP__
|
||||||
@ -603,87 +603,6 @@ inline std::vector<int> generate_scm_mask(
|
|||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline std::vector<int> get_scm_preset(const std::string& preset, int total_steps) {
|
|
||||||
struct Preset {
|
|
||||||
std::vector<int> compute_bins;
|
|
||||||
std::vector<int> cache_bins;
|
|
||||||
};
|
|
||||||
|
|
||||||
Preset slow = {{8, 3, 3, 2, 1, 1}, {1, 2, 2, 2, 3}};
|
|
||||||
Preset medium = {{6, 2, 2, 2, 2, 1}, {1, 3, 3, 3, 3}};
|
|
||||||
Preset fast = {{6, 1, 1, 1, 1, 1}, {1, 3, 4, 5, 4}};
|
|
||||||
Preset ultra = {{4, 1, 1, 1, 1}, {2, 5, 6, 7}};
|
|
||||||
|
|
||||||
Preset* p = nullptr;
|
|
||||||
if (preset == "slow" || preset == "s" || preset == "S")
|
|
||||||
p = &slow;
|
|
||||||
else if (preset == "medium" || preset == "m" || preset == "M")
|
|
||||||
p = &medium;
|
|
||||||
else if (preset == "fast" || preset == "f" || preset == "F")
|
|
||||||
p = &fast;
|
|
||||||
else if (preset == "ultra" || preset == "u" || preset == "U")
|
|
||||||
p = &ultra;
|
|
||||||
else
|
|
||||||
return {};
|
|
||||||
|
|
||||||
if (total_steps != 28 && total_steps > 0) {
|
|
||||||
float scale = static_cast<float>(total_steps) / 28.0f;
|
|
||||||
std::vector<int> scaled_compute, scaled_cache;
|
|
||||||
|
|
||||||
for (int v : p->compute_bins) {
|
|
||||||
scaled_compute.push_back(std::max(1, static_cast<int>(v * scale + 0.5f)));
|
|
||||||
}
|
|
||||||
for (int v : p->cache_bins) {
|
|
||||||
scaled_cache.push_back(std::max(1, static_cast<int>(v * scale + 0.5f)));
|
|
||||||
}
|
|
||||||
|
|
||||||
return generate_scm_mask(scaled_compute, scaled_cache, total_steps);
|
|
||||||
}
|
|
||||||
|
|
||||||
return generate_scm_mask(p->compute_bins, p->cache_bins, total_steps);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline float get_preset_threshold(const std::string& preset) {
|
|
||||||
if (preset == "slow" || preset == "s" || preset == "S")
|
|
||||||
return 0.20f;
|
|
||||||
if (preset == "medium" || preset == "m" || preset == "M")
|
|
||||||
return 0.25f;
|
|
||||||
if (preset == "fast" || preset == "f" || preset == "F")
|
|
||||||
return 0.30f;
|
|
||||||
if (preset == "ultra" || preset == "u" || preset == "U")
|
|
||||||
return 0.34f;
|
|
||||||
return 0.08f;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int get_preset_warmup(const std::string& preset) {
|
|
||||||
if (preset == "slow" || preset == "s" || preset == "S")
|
|
||||||
return 8;
|
|
||||||
if (preset == "medium" || preset == "m" || preset == "M")
|
|
||||||
return 6;
|
|
||||||
if (preset == "fast" || preset == "f" || preset == "F")
|
|
||||||
return 6;
|
|
||||||
if (preset == "ultra" || preset == "u" || preset == "U")
|
|
||||||
return 4;
|
|
||||||
return 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int get_preset_Fn(const std::string& preset) {
|
|
||||||
if (preset == "slow" || preset == "s" || preset == "S")
|
|
||||||
return 8;
|
|
||||||
if (preset == "medium" || preset == "m" || preset == "M")
|
|
||||||
return 8;
|
|
||||||
if (preset == "fast" || preset == "f" || preset == "F")
|
|
||||||
return 6;
|
|
||||||
if (preset == "ultra" || preset == "u" || preset == "U")
|
|
||||||
return 4;
|
|
||||||
return 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int get_preset_Bn(const std::string& preset) {
|
|
||||||
(void)preset;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void parse_dbcache_options(const std::string& opts, DBCacheConfig& cfg) {
|
inline void parse_dbcache_options(const std::string& opts, DBCacheConfig& cfg) {
|
||||||
if (opts.empty())
|
if (opts.empty())
|
||||||
return;
|
return;
|
||||||
@ -880,7 +799,7 @@ struct CacheDitConditionState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool before_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output, float sigma, int step_index) {
|
bool before_condition(const void* cond, ggml_tensor* input, ggml_tensor* output, float sigma, int step_index) {
|
||||||
if (!enabled() || step_index < 0)
|
if (!enabled() || step_index < 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -948,7 +867,7 @@ struct CacheDitConditionState {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void after_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output) {
|
void after_condition(const void* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||||
if (!step_is_active())
|
if (!step_is_active())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|||||||
118
src/clip.hpp
118
src/clip.hpp
@ -473,7 +473,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, n_token, d_model]
|
// x: [N, n_token, d_model]
|
||||||
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
||||||
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
||||||
@ -511,7 +511,7 @@ public:
|
|||||||
blocks["mlp"] = std::shared_ptr<GGMLBlock>(new CLIPMLP(d_model, intermediate_size));
|
blocks["mlp"] = std::shared_ptr<GGMLBlock>(new CLIPMLP(d_model, intermediate_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* mask = nullptr) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* mask = nullptr) {
|
||||||
// x: [N, n_token, d_model]
|
// x: [N, n_token, d_model]
|
||||||
auto self_attn = std::dynamic_pointer_cast<MultiheadAttention>(blocks["self_attn"]);
|
auto self_attn = std::dynamic_pointer_cast<MultiheadAttention>(blocks["self_attn"]);
|
||||||
auto layer_norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm1"]);
|
auto layer_norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm1"]);
|
||||||
@ -541,10 +541,10 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
int clip_skip = -1) {
|
int clip_skip = -1) {
|
||||||
// x: [N, n_token, d_model]
|
// x: [N, n_token, d_model]
|
||||||
int layer_idx = n_layer - 1;
|
int layer_idx = n_layer - 1;
|
||||||
// LOG_DEBUG("clip_skip %d", clip_skip);
|
// LOG_DEBUG("clip_skip %d", clip_skip);
|
||||||
@ -573,7 +573,7 @@ protected:
|
|||||||
int64_t num_positions;
|
int64_t num_positions;
|
||||||
bool force_clip_f32;
|
bool force_clip_f32;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type token_wtype = GGML_TYPE_F32;
|
enum ggml_type token_wtype = GGML_TYPE_F32;
|
||||||
if (!force_clip_f32) {
|
if (!force_clip_f32) {
|
||||||
token_wtype = get_type(prefix + "token_embedding.weight", tensor_storage_map, GGML_TYPE_F32);
|
token_wtype = get_type(prefix + "token_embedding.weight", tensor_storage_map, GGML_TYPE_F32);
|
||||||
@ -597,13 +597,13 @@ public:
|
|||||||
force_clip_f32(force_clip_f32) {
|
force_clip_f32(force_clip_f32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* get_token_embed_weight() {
|
ggml_tensor* get_token_embed_weight() {
|
||||||
return params["token_embedding.weight"];
|
return params["token_embedding.weight"];
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* custom_embed_weight) {
|
ggml_tensor* custom_embed_weight) {
|
||||||
// input_ids: [N, n_token]
|
// input_ids: [N, n_token]
|
||||||
auto token_embed_weight = params["token_embedding.weight"];
|
auto token_embed_weight = params["token_embedding.weight"];
|
||||||
auto position_embed_weight = params["position_embedding.weight"];
|
auto position_embed_weight = params["position_embedding.weight"];
|
||||||
@ -630,7 +630,7 @@ protected:
|
|||||||
int num_patches;
|
int num_patches;
|
||||||
int64_t num_positions;
|
int64_t num_positions;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type patch_wtype = GGML_TYPE_F16;
|
enum ggml_type patch_wtype = GGML_TYPE_F16;
|
||||||
enum ggml_type class_wtype = GGML_TYPE_F32;
|
enum ggml_type class_wtype = GGML_TYPE_F32;
|
||||||
enum ggml_type position_wtype = GGML_TYPE_F32;
|
enum ggml_type position_wtype = GGML_TYPE_F32;
|
||||||
@ -653,7 +653,7 @@ public:
|
|||||||
num_positions = num_patches + 1;
|
num_positions = num_patches + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* pixel_values) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* pixel_values) {
|
||||||
// pixel_values: [N, num_channels, image_size, image_size]
|
// pixel_values: [N, num_channels, image_size, image_size]
|
||||||
// return: [N, num_positions, embed_dim]
|
// return: [N, num_positions, embed_dim]
|
||||||
GGML_ASSERT(pixel_values->ne[0] == image_size && pixel_values->ne[1] == image_size && pixel_values->ne[2] == num_channels);
|
GGML_ASSERT(pixel_values->ne[0] == image_size && pixel_values->ne[1] == image_size && pixel_values->ne[2] == num_channels);
|
||||||
@ -663,20 +663,20 @@ public:
|
|||||||
auto position_embed_weight = params["position_embedding.weight"];
|
auto position_embed_weight = params["position_embedding.weight"];
|
||||||
|
|
||||||
// concat(patch_embedding, class_embedding) + position_embedding
|
// concat(patch_embedding, class_embedding) + position_embedding
|
||||||
struct ggml_tensor* patch_embedding;
|
ggml_tensor* patch_embedding;
|
||||||
int64_t N = pixel_values->ne[3];
|
int64_t N = pixel_values->ne[3];
|
||||||
patch_embedding = ggml_ext_conv_2d(ctx->ggml_ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
|
patch_embedding = ggml_ext_conv_2d(ctx->ggml_ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
|
||||||
patch_embedding = ggml_reshape_3d(ctx->ggml_ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
|
patch_embedding = ggml_reshape_3d(ctx->ggml_ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
|
||||||
patch_embedding = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
|
patch_embedding = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
|
||||||
patch_embedding = ggml_reshape_4d(ctx->ggml_ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
|
patch_embedding = ggml_reshape_4d(ctx->ggml_ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
|
||||||
|
|
||||||
struct ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx->ggml_ctx, GGML_TYPE_F32, embed_dim, N);
|
ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx->ggml_ctx, GGML_TYPE_F32, embed_dim, N);
|
||||||
class_embedding = ggml_repeat(ctx->ggml_ctx, class_embed_weight, class_embedding); // [N, embed_dim]
|
class_embedding = ggml_repeat(ctx->ggml_ctx, class_embed_weight, class_embedding); // [N, embed_dim]
|
||||||
class_embedding = ggml_reshape_4d(ctx->ggml_ctx, class_embedding, 1, embed_dim, 1, N); // [N, 1, embed_dim, 1]
|
class_embedding = ggml_reshape_4d(ctx->ggml_ctx, class_embedding, 1, embed_dim, 1, N); // [N, 1, embed_dim, 1]
|
||||||
|
|
||||||
struct ggml_tensor* x = ggml_concat(ctx->ggml_ctx, class_embedding, patch_embedding, 2); // [N, num_positions, embed_dim, 1]
|
ggml_tensor* x = ggml_concat(ctx->ggml_ctx, class_embedding, patch_embedding, 2); // [N, num_positions, embed_dim, 1]
|
||||||
x = ggml_reshape_3d(ctx->ggml_ctx, x, embed_dim, num_positions, N); // [N, num_positions, embed_dim]
|
x = ggml_reshape_3d(ctx->ggml_ctx, x, embed_dim, num_positions, N); // [N, num_positions, embed_dim]
|
||||||
x = ggml_add(ctx->ggml_ctx, x, position_embed_weight);
|
x = ggml_add(ctx->ggml_ctx, x, position_embed_weight);
|
||||||
return x; // [N, num_positions, embed_dim]
|
return x; // [N, num_positions, embed_dim]
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -693,7 +693,7 @@ enum CLIPVersion {
|
|||||||
|
|
||||||
class CLIPTextModel : public GGMLBlock {
|
class CLIPTextModel : public GGMLBlock {
|
||||||
protected:
|
protected:
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
if (version == OPEN_CLIP_VIT_BIGG_14) {
|
if (version == OPEN_CLIP_VIT_BIGG_14) {
|
||||||
enum ggml_type wtype = GGML_TYPE_F32;
|
enum ggml_type wtype = GGML_TYPE_F32;
|
||||||
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
|
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
|
||||||
@ -734,18 +734,18 @@ public:
|
|||||||
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
|
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* get_token_embed_weight() {
|
ggml_tensor* get_token_embed_weight() {
|
||||||
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
|
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
|
||||||
return embeddings->get_token_embed_weight();
|
return embeddings->get_token_embed_weight();
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* tkn_embeddings,
|
ggml_tensor* tkn_embeddings,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
size_t max_token_idx = 0,
|
size_t max_token_idx = 0,
|
||||||
bool return_pooled = false,
|
bool return_pooled = false,
|
||||||
int clip_skip = -1) {
|
int clip_skip = -1) {
|
||||||
// input_ids: [N, n_token]
|
// input_ids: [N, n_token]
|
||||||
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
|
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
|
||||||
auto encoder = std::dynamic_pointer_cast<CLIPEncoder>(blocks["encoder"]);
|
auto encoder = std::dynamic_pointer_cast<CLIPEncoder>(blocks["encoder"]);
|
||||||
@ -804,10 +804,10 @@ public:
|
|||||||
blocks["post_layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
|
blocks["post_layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* pixel_values,
|
ggml_tensor* pixel_values,
|
||||||
bool return_pooled = true,
|
bool return_pooled = true,
|
||||||
int clip_skip = -1) {
|
int clip_skip = -1) {
|
||||||
// pixel_values: [N, num_channels, image_size, image_size]
|
// pixel_values: [N, num_channels, image_size, image_size]
|
||||||
auto embeddings = std::dynamic_pointer_cast<CLIPVisionEmbeddings>(blocks["embeddings"]);
|
auto embeddings = std::dynamic_pointer_cast<CLIPVisionEmbeddings>(blocks["embeddings"]);
|
||||||
auto pre_layernorm = std::dynamic_pointer_cast<LayerNorm>(blocks["pre_layernorm"]);
|
auto pre_layernorm = std::dynamic_pointer_cast<LayerNorm>(blocks["pre_layernorm"]);
|
||||||
@ -839,7 +839,7 @@ protected:
|
|||||||
int64_t out_features;
|
int64_t out_features;
|
||||||
bool transpose_weight;
|
bool transpose_weight;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||||
if (transpose_weight) {
|
if (transpose_weight) {
|
||||||
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
|
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
|
||||||
@ -856,8 +856,8 @@ public:
|
|||||||
out_features(out_features),
|
out_features(out_features),
|
||||||
transpose_weight(transpose_weight) {}
|
transpose_weight(transpose_weight) {}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
struct ggml_tensor* w = params["weight"];
|
ggml_tensor* w = params["weight"];
|
||||||
if (transpose_weight) {
|
if (transpose_weight) {
|
||||||
w = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, w));
|
w = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, w));
|
||||||
}
|
}
|
||||||
@ -886,10 +886,10 @@ public:
|
|||||||
blocks["visual_projection"] = std::shared_ptr<GGMLBlock>(new CLIPProjection(hidden_size, projection_dim, transpose_proj_w));
|
blocks["visual_projection"] = std::shared_ptr<GGMLBlock>(new CLIPProjection(hidden_size, projection_dim, transpose_proj_w));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* pixel_values,
|
ggml_tensor* pixel_values,
|
||||||
bool return_pooled = true,
|
bool return_pooled = true,
|
||||||
int clip_skip = -1) {
|
int clip_skip = -1) {
|
||||||
// pixel_values: [N, num_channels, image_size, image_size]
|
// pixel_values: [N, num_channels, image_size, image_size]
|
||||||
// return: [N, projection_dim] if return_pooled else [N, n_token, hidden_size]
|
// return: [N, projection_dim] if return_pooled else [N, n_token, hidden_size]
|
||||||
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
||||||
@ -936,17 +936,17 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
return "clip";
|
return "clip";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
model.get_param_tensors(tensors, prefix);
|
model.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* embeddings,
|
ggml_tensor* embeddings,
|
||||||
struct ggml_tensor* mask,
|
ggml_tensor* mask,
|
||||||
size_t max_token_idx = 0,
|
size_t max_token_idx = 0,
|
||||||
bool return_pooled = false,
|
bool return_pooled = false,
|
||||||
int clip_skip = -1) {
|
int clip_skip = -1) {
|
||||||
size_t N = input_ids->ne[1];
|
size_t N = input_ids->ne[1];
|
||||||
size_t n_token = input_ids->ne[0];
|
size_t n_token = input_ids->ne[0];
|
||||||
if (input_ids->ne[0] > model.n_token) {
|
if (input_ids->ne[0] > model.n_token) {
|
||||||
@ -957,17 +957,17 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip);
|
return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
|
ggml_cgraph* build_graph(ggml_tensor* input_ids,
|
||||||
int num_custom_embeddings = 0,
|
int num_custom_embeddings = 0,
|
||||||
void* custom_embeddings_data = nullptr,
|
void* custom_embeddings_data = nullptr,
|
||||||
size_t max_token_idx = 0,
|
size_t max_token_idx = 0,
|
||||||
bool return_pooled = false,
|
bool return_pooled = false,
|
||||||
int clip_skip = -1) {
|
int clip_skip = -1) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(2048);
|
ggml_cgraph* gf = new_graph_custom(2048);
|
||||||
|
|
||||||
input_ids = to_backend(input_ids);
|
input_ids = to_backend(input_ids);
|
||||||
|
|
||||||
struct ggml_tensor* embeddings = nullptr;
|
ggml_tensor* embeddings = nullptr;
|
||||||
|
|
||||||
if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) {
|
if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) {
|
||||||
auto token_embed_weight = model.get_token_embed_weight();
|
auto token_embed_weight = model.get_token_embed_weight();
|
||||||
@ -997,7 +997,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip);
|
ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, hidden_states);
|
ggml_build_forward_expand(gf, hidden_states);
|
||||||
|
|
||||||
@ -1005,7 +1005,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool compute(const int n_threads,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
int num_custom_embeddings,
|
int num_custom_embeddings,
|
||||||
void* custom_embeddings_data,
|
void* custom_embeddings_data,
|
||||||
size_t max_token_idx,
|
size_t max_token_idx,
|
||||||
@ -1013,7 +1013,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
int clip_skip,
|
int clip_skip,
|
||||||
ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
|
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||||
|
|||||||
@ -23,7 +23,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
if (vae_downsample) {
|
if (vae_downsample) {
|
||||||
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
||||||
@ -52,7 +52,7 @@ public:
|
|||||||
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
||||||
|
|
||||||
@ -121,7 +121,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = nullptr) {
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* emb = nullptr) {
|
||||||
// For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml
|
// For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml
|
||||||
// [N, c, t, h, w] => [N, c, t, h * w]
|
// [N, c, t, h, w] => [N, c, t, h * w]
|
||||||
// x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w]
|
// x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w]
|
||||||
@ -188,7 +188,7 @@ public:
|
|||||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out * 2));
|
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out * 2));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [ne3, ne2, ne1, dim_in]
|
// x: [ne3, ne2, ne1, dim_in]
|
||||||
// return: [ne3, ne2, ne1, dim_out]
|
// return: [ne3, ne2, ne1, dim_out]
|
||||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||||
@ -214,7 +214,7 @@ public:
|
|||||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
|
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [ne3, ne2, ne1, dim_in]
|
// x: [ne3, ne2, ne1, dim_in]
|
||||||
// return: [ne3, ne2, ne1, dim_out]
|
// return: [ne3, ne2, ne1, dim_out]
|
||||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||||
@ -258,7 +258,7 @@ public:
|
|||||||
blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out, true, false, force_prec_f32, scale));
|
blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out, true, false, force_prec_f32, scale));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [ne3, ne2, ne1, dim]
|
// x: [ne3, ne2, ne1, dim]
|
||||||
// return: [ne3, ne2, ne1, dim_out]
|
// return: [ne3, ne2, ne1, dim_out]
|
||||||
|
|
||||||
@ -297,9 +297,9 @@ public:
|
|||||||
// to_out_1 is nn.Dropout(), skip for inference
|
// to_out_1 is nn.Dropout(), skip for inference
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context) {
|
ggml_tensor* context) {
|
||||||
// x: [N, n_token, query_dim]
|
// x: [N, n_token, query_dim]
|
||||||
// context: [N, n_context, context_dim]
|
// context: [N, n_context, context_dim]
|
||||||
// return: [N, n_token, query_dim]
|
// return: [N, n_token, query_dim]
|
||||||
@ -355,9 +355,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context) {
|
ggml_tensor* context) {
|
||||||
// x: [N, n_token, query_dim]
|
// x: [N, n_token, query_dim]
|
||||||
// context: [N, n_context, context_dim]
|
// context: [N, n_context, context_dim]
|
||||||
// return: [N, n_token, query_dim]
|
// return: [N, n_token, query_dim]
|
||||||
@ -406,7 +406,7 @@ protected:
|
|||||||
int64_t context_dim = 768; // hidden_size, 1024 for VERSION_SD2
|
int64_t context_dim = 768; // hidden_size, 1024 for VERSION_SD2
|
||||||
bool use_linear = false;
|
bool use_linear = false;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
||||||
auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
|
auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
|
||||||
if (iter != tensor_storage_map.end()) {
|
if (iter != tensor_storage_map.end()) {
|
||||||
int64_t inner_dim = n_head * d_head;
|
int64_t inner_dim = n_head * d_head;
|
||||||
@ -456,9 +456,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context) {
|
ggml_tensor* context) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// context: [N, max_position(aka n_token), hidden_size(aka context_dim)]
|
// context: [N, max_position(aka n_token), hidden_size(aka context_dim)]
|
||||||
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
|
||||||
@ -510,7 +510,7 @@ public:
|
|||||||
|
|
||||||
class AlphaBlender : public GGMLBlock {
|
class AlphaBlender : public GGMLBlock {
|
||||||
protected:
|
protected:
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
||||||
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
|
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
|
||||||
enum ggml_type wtype = GGML_TYPE_F32;
|
enum ggml_type wtype = GGML_TYPE_F32;
|
||||||
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
||||||
@ -530,9 +530,9 @@ public:
|
|||||||
// since mix_factor.shape is [1,], we don't need rearrange using rearrange_pattern
|
// since mix_factor.shape is [1,], we don't need rearrange using rearrange_pattern
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x_spatial,
|
ggml_tensor* x_spatial,
|
||||||
struct ggml_tensor* x_temporal) {
|
ggml_tensor* x_temporal) {
|
||||||
// image_only_indicator is always tensor([0.])
|
// image_only_indicator is always tensor([0.])
|
||||||
float alpha = get_alpha();
|
float alpha = get_alpha();
|
||||||
auto x = ggml_add(ctx->ggml_ctx,
|
auto x = ggml_add(ctx->ggml_ctx,
|
||||||
@ -555,10 +555,10 @@ public:
|
|||||||
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
|
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* emb,
|
ggml_tensor* emb,
|
||||||
int num_video_frames) {
|
int num_video_frames) {
|
||||||
// x: [N, channels, h, w] aka [b*t, channels, h, w]
|
// x: [N, channels, h, w] aka [b*t, channels, h, w]
|
||||||
// emb: [N, emb_channels] aka [b*t, emb_channels]
|
// emb: [N, emb_channels] aka [b*t, emb_channels]
|
||||||
// image_only_indicator is always tensor([0.])
|
// image_only_indicator is always tensor([0.])
|
||||||
|
|||||||
@ -6,17 +6,17 @@
|
|||||||
#include "t5.hpp"
|
#include "t5.hpp"
|
||||||
|
|
||||||
struct SDCondition {
|
struct SDCondition {
|
||||||
struct ggml_tensor* c_crossattn = nullptr; // aka context
|
ggml_tensor* c_crossattn = nullptr; // aka context
|
||||||
struct ggml_tensor* c_vector = nullptr; // aka y
|
ggml_tensor* c_vector = nullptr; // aka y
|
||||||
struct ggml_tensor* c_concat = nullptr;
|
ggml_tensor* c_concat = nullptr;
|
||||||
|
|
||||||
std::vector<struct ggml_tensor*> extra_c_crossattns;
|
std::vector<ggml_tensor*> extra_c_crossattns;
|
||||||
|
|
||||||
SDCondition() = default;
|
SDCondition() = default;
|
||||||
SDCondition(struct ggml_tensor* c_crossattn,
|
SDCondition(ggml_tensor* c_crossattn,
|
||||||
struct ggml_tensor* c_vector,
|
ggml_tensor* c_vector,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
const std::vector<struct ggml_tensor*>& extra_c_crossattns = {})
|
const std::vector<ggml_tensor*>& extra_c_crossattns = {})
|
||||||
: c_crossattn(c_crossattn), c_vector(c_vector), c_concat(c_concat), extra_c_crossattns(extra_c_crossattns) {}
|
: c_crossattn(c_crossattn), c_vector(c_vector), c_concat(c_concat), extra_c_crossattns(extra_c_crossattns) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ struct Conditioner {
|
|||||||
const ConditionerParams& conditioner_params) = 0;
|
const ConditionerParams& conditioner_params) = 0;
|
||||||
virtual void alloc_params_buffer() = 0;
|
virtual void alloc_params_buffer() = 0;
|
||||||
virtual void free_params_buffer() = 0;
|
virtual void free_params_buffer() = 0;
|
||||||
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) = 0;
|
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) = 0;
|
||||||
virtual size_t get_params_buffer_size() = 0;
|
virtual size_t get_params_buffer_size() = 0;
|
||||||
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
||||||
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) {}
|
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) {}
|
||||||
@ -92,7 +92,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
text_model->get_param_tensors(tensors, "cond_stage_model.transformer.text_model");
|
text_model->get_param_tensors(tensors, "cond_stage_model.transformer.text_model");
|
||||||
if (sd_version_is_sdxl(version)) {
|
if (sd_version_is_sdxl(version)) {
|
||||||
text_model2->get_param_tensors(tensors, "cond_stage_model.1.transformer.text_model");
|
text_model2->get_param_tensors(tensors, "cond_stage_model.1.transformer.text_model");
|
||||||
@ -149,14 +149,14 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = 100 * 1024 * 1024; // max for custom embeddings 100 MB
|
params.mem_size = 100 * 1024 * 1024; // max for custom embeddings 100 MB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
struct ggml_context* embd_ctx = ggml_init(params);
|
ggml_context* embd_ctx = ggml_init(params);
|
||||||
struct ggml_tensor* embd = nullptr;
|
ggml_tensor* embd = nullptr;
|
||||||
struct ggml_tensor* embd2 = nullptr;
|
ggml_tensor* embd2 = nullptr;
|
||||||
auto on_load = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) {
|
auto on_load = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) {
|
||||||
if (tensor_storage.ne[0] != text_model->model.hidden_size) {
|
if (tensor_storage.ne[0] != text_model->model.hidden_size) {
|
||||||
if (text_model2) {
|
if (text_model2) {
|
||||||
if (tensor_storage.ne[0] == text_model2->model.hidden_size) {
|
if (tensor_storage.ne[0] == text_model2->model.hidden_size) {
|
||||||
@ -435,12 +435,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
int height,
|
int height,
|
||||||
int adm_in_channels = -1,
|
int adm_in_channels = -1,
|
||||||
bool zero_out_masked = false) {
|
bool zero_out_masked = false) {
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
struct ggml_tensor* hidden_states = nullptr; // [N, n_token, hidden_size]
|
ggml_tensor* hidden_states = nullptr; // [N, n_token, hidden_size]
|
||||||
struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token, hidden_size] or [n_token, hidden_size + hidden_size2]
|
ggml_tensor* chunk_hidden_states = nullptr; // [n_token, hidden_size] or [n_token, hidden_size + hidden_size2]
|
||||||
struct ggml_tensor* chunk_hidden_states1 = nullptr; // [n_token, hidden_size]
|
ggml_tensor* chunk_hidden_states1 = nullptr; // [n_token, hidden_size]
|
||||||
struct ggml_tensor* chunk_hidden_states2 = nullptr; // [n_token, hidden_size2]
|
ggml_tensor* chunk_hidden_states2 = nullptr; // [n_token, hidden_size2]
|
||||||
struct ggml_tensor* pooled = nullptr;
|
ggml_tensor* pooled = nullptr;
|
||||||
std::vector<float> hidden_states_vec;
|
std::vector<float> hidden_states_vec;
|
||||||
|
|
||||||
if (clip_skip <= 0) {
|
if (clip_skip <= 0) {
|
||||||
@ -455,9 +455,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
std::vector<float> chunk_weights(weights.begin() + chunk_idx * chunk_len,
|
std::vector<float> chunk_weights(weights.begin() + chunk_idx * chunk_len,
|
||||||
weights.begin() + (chunk_idx + 1) * chunk_len);
|
weights.begin() + (chunk_idx + 1) * chunk_len);
|
||||||
|
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
|
||||||
struct ggml_tensor* input_ids2 = nullptr;
|
ggml_tensor* input_ids2 = nullptr;
|
||||||
size_t max_token_idx = 0;
|
size_t max_token_idx = 0;
|
||||||
if (sd_version_is_sdxl(version)) {
|
if (sd_version_is_sdxl(version)) {
|
||||||
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), tokenizer.EOS_TOKEN_ID);
|
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), tokenizer.EOS_TOKEN_ID);
|
||||||
if (it != chunk_tokens.end()) {
|
if (it != chunk_tokens.end()) {
|
||||||
@ -676,18 +676,18 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner {
|
|||||||
return "clip_vision";
|
return "clip_vision";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) {
|
||||||
vision_model.get_param_tensors(tensors, "cond_stage_model.transformer");
|
vision_model.get_param_tensors(tensors, "cond_stage_model.transformer");
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* pixel_values, bool return_pooled, int clip_skip) {
|
ggml_cgraph* build_graph(ggml_tensor* pixel_values, bool return_pooled, int clip_skip) {
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
|
||||||
pixel_values = to_backend(pixel_values);
|
pixel_values = to_backend(pixel_values);
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* hidden_states = vision_model.forward(&runner_ctx, pixel_values, return_pooled, clip_skip);
|
ggml_tensor* hidden_states = vision_model.forward(&runner_ctx, pixel_values, return_pooled, clip_skip);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, hidden_states);
|
ggml_build_forward_expand(gf, hidden_states);
|
||||||
|
|
||||||
@ -700,7 +700,7 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner {
|
|||||||
int clip_skip,
|
int clip_skip,
|
||||||
ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
ggml_context* output_ctx) {
|
ggml_context* output_ctx) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(pixel_values, return_pooled, clip_skip);
|
return build_graph(pixel_values, return_pooled, clip_skip);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||||
@ -746,7 +746,7 @@ struct SD3CLIPEmbedder : public Conditioner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
if (clip_l) {
|
if (clip_l) {
|
||||||
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
|
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
|
||||||
}
|
}
|
||||||
@ -909,15 +909,15 @@ struct SD3CLIPEmbedder : public Conditioner {
|
|||||||
clip_skip = 2;
|
clip_skip = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
struct ggml_tensor* hidden_states = nullptr; // [N, n_token*2, 4096]
|
ggml_tensor* hidden_states = nullptr; // [N, n_token*2, 4096]
|
||||||
struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token*2, 4096]
|
ggml_tensor* chunk_hidden_states = nullptr; // [n_token*2, 4096]
|
||||||
struct ggml_tensor* chunk_hidden_states_l = nullptr; // [n_token, hidden_size_l]
|
ggml_tensor* chunk_hidden_states_l = nullptr; // [n_token, hidden_size_l]
|
||||||
struct ggml_tensor* chunk_hidden_states_g = nullptr; // [n_token, hidden_size_g]
|
ggml_tensor* chunk_hidden_states_g = nullptr; // [n_token, hidden_size_g]
|
||||||
struct ggml_tensor* chunk_hidden_states_t5 = nullptr; // [n_token, hidden_size_t5]
|
ggml_tensor* chunk_hidden_states_t5 = nullptr; // [n_token, hidden_size_t5]
|
||||||
struct ggml_tensor* pooled = nullptr;
|
ggml_tensor* pooled = nullptr;
|
||||||
struct ggml_tensor* pooled_l = nullptr; // [768,]
|
ggml_tensor* pooled_l = nullptr; // [768,]
|
||||||
struct ggml_tensor* pooled_g = nullptr; // [1280,]
|
ggml_tensor* pooled_g = nullptr; // [1280,]
|
||||||
std::vector<float> hidden_states_vec;
|
std::vector<float> hidden_states_vec;
|
||||||
|
|
||||||
size_t chunk_len = 77;
|
size_t chunk_len = 77;
|
||||||
@ -1178,7 +1178,7 @@ struct FluxCLIPEmbedder : public Conditioner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
if (clip_l) {
|
if (clip_l) {
|
||||||
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
|
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
|
||||||
}
|
}
|
||||||
@ -1306,10 +1306,10 @@ struct FluxCLIPEmbedder : public Conditioner {
|
|||||||
clip_skip = 2;
|
clip_skip = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 4096]
|
ggml_tensor* hidden_states = nullptr; // [N, n_token, 4096]
|
||||||
struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token, 4096]
|
ggml_tensor* chunk_hidden_states = nullptr; // [n_token, 4096]
|
||||||
struct ggml_tensor* pooled = nullptr; // [768,]
|
ggml_tensor* pooled = nullptr; // [768,]
|
||||||
std::vector<float> hidden_states_vec;
|
std::vector<float> hidden_states_vec;
|
||||||
|
|
||||||
size_t chunk_count = std::max(clip_l_tokens.size() > 0 ? chunk_len : 0, t5_tokens.size()) / chunk_len;
|
size_t chunk_count = std::max(clip_l_tokens.size() > 0 ? chunk_len : 0, t5_tokens.size()) / chunk_len;
|
||||||
@ -1448,7 +1448,7 @@ struct T5CLIPEmbedder : public Conditioner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
if (t5) {
|
if (t5) {
|
||||||
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
|
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
|
||||||
}
|
}
|
||||||
@ -1523,7 +1523,7 @@ struct T5CLIPEmbedder : public Conditioner {
|
|||||||
return {t5_tokens, t5_weights, t5_mask};
|
return {t5_tokens, t5_weights, t5_mask};
|
||||||
}
|
}
|
||||||
|
|
||||||
void modify_mask_to_attend_padding(struct ggml_tensor* mask, int max_seq_length, int num_extra_padding = 8) {
|
void modify_mask_to_attend_padding(ggml_tensor* mask, int max_seq_length, int num_extra_padding = 8) {
|
||||||
float* mask_data = (float*)mask->data;
|
float* mask_data = (float*)mask->data;
|
||||||
int num_pad = 0;
|
int num_pad = 0;
|
||||||
for (int64_t i = 0; i < max_seq_length; i++) {
|
for (int64_t i = 0; i < max_seq_length; i++) {
|
||||||
@ -1554,11 +1554,11 @@ struct T5CLIPEmbedder : public Conditioner {
|
|||||||
auto& t5_weights = std::get<1>(token_and_weights);
|
auto& t5_weights = std::get<1>(token_and_weights);
|
||||||
auto& t5_attn_mask_vec = std::get<2>(token_and_weights);
|
auto& t5_attn_mask_vec = std::get<2>(token_and_weights);
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 4096]
|
ggml_tensor* hidden_states = nullptr; // [N, n_token, 4096]
|
||||||
struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token, 4096]
|
ggml_tensor* chunk_hidden_states = nullptr; // [n_token, 4096]
|
||||||
struct ggml_tensor* pooled = nullptr;
|
ggml_tensor* pooled = nullptr;
|
||||||
struct ggml_tensor* t5_attn_mask = vector_to_ggml_tensor(work_ctx, t5_attn_mask_vec); // [n_token]
|
ggml_tensor* t5_attn_mask = vector_to_ggml_tensor(work_ctx, t5_attn_mask_vec); // [n_token]
|
||||||
|
|
||||||
std::vector<float> hidden_states_vec;
|
std::vector<float> hidden_states_vec;
|
||||||
|
|
||||||
@ -1658,7 +1658,7 @@ struct AnimaConditioner : public Conditioner {
|
|||||||
false);
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
llm->get_param_tensors(tensors, "text_encoders.llm");
|
llm->get_param_tensors(tensors, "text_encoders.llm");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1736,7 +1736,7 @@ struct AnimaConditioner : public Conditioner {
|
|||||||
|
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, qwen_tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, qwen_tokens);
|
||||||
|
|
||||||
struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 1024]
|
ggml_tensor* hidden_states = nullptr; // [N, n_token, 1024]
|
||||||
llm->compute(n_threads,
|
llm->compute(n_threads,
|
||||||
input_ids,
|
input_ids,
|
||||||
nullptr,
|
nullptr,
|
||||||
@ -1763,8 +1763,8 @@ struct AnimaConditioner : public Conditioner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* t5_ids_tensor = nullptr;
|
ggml_tensor* t5_ids_tensor = nullptr;
|
||||||
struct ggml_tensor* t5_weight_tensor = nullptr;
|
ggml_tensor* t5_weight_tensor = nullptr;
|
||||||
if (!t5_tokens.empty()) {
|
if (!t5_tokens.empty()) {
|
||||||
t5_ids_tensor = vector_to_ggml_tensor_i32(work_ctx, t5_tokens);
|
t5_ids_tensor = vector_to_ggml_tensor_i32(work_ctx, t5_tokens);
|
||||||
t5_weight_tensor = vector_to_ggml_tensor(work_ctx, t5_weights);
|
t5_weight_tensor = vector_to_ggml_tensor(work_ctx, t5_weights);
|
||||||
@ -1808,7 +1808,7 @@ struct LLMEmbedder : public Conditioner {
|
|||||||
enable_vision);
|
enable_vision);
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
llm->get_param_tensors(tensors, "text_encoders.llm");
|
llm->get_param_tensors(tensors, "text_encoders.llm");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1904,7 +1904,7 @@ struct LLMEmbedder : public Conditioner {
|
|||||||
tokenizer->pad_tokens(tokens, weights, max_length, true);
|
tokenizer->pad_tokens(tokens, weights, max_length, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* hidden_states = nullptr; // [N, n_token, hidden_size]
|
ggml_tensor* hidden_states = nullptr; // [N, n_token, hidden_size]
|
||||||
|
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||||
|
|
||||||
|
|||||||
@ -164,26 +164,26 @@ public:
|
|||||||
blocks["middle_block_out.0"] = std::shared_ptr<GGMLBlock>(make_zero_conv(ch));
|
blocks["middle_block_out.0"] = std::shared_ptr<GGMLBlock>(make_zero_conv(ch));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* resblock_forward(std::string name,
|
ggml_tensor* resblock_forward(std::string name,
|
||||||
GGMLRunnerContext* ctx,
|
GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* emb) {
|
ggml_tensor* emb) {
|
||||||
auto block = std::dynamic_pointer_cast<ResBlock>(blocks[name]);
|
auto block = std::dynamic_pointer_cast<ResBlock>(blocks[name]);
|
||||||
return block->forward(ctx, x, emb);
|
return block->forward(ctx, x, emb);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* attention_layer_forward(std::string name,
|
ggml_tensor* attention_layer_forward(std::string name,
|
||||||
GGMLRunnerContext* ctx,
|
GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context) {
|
ggml_tensor* context) {
|
||||||
auto block = std::dynamic_pointer_cast<SpatialTransformer>(blocks[name]);
|
auto block = std::dynamic_pointer_cast<SpatialTransformer>(blocks[name]);
|
||||||
return block->forward(ctx, x, context);
|
return block->forward(ctx, x, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* input_hint_block_forward(GGMLRunnerContext* ctx,
|
ggml_tensor* input_hint_block_forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* hint,
|
ggml_tensor* hint,
|
||||||
struct ggml_tensor* emb,
|
ggml_tensor* emb,
|
||||||
struct ggml_tensor* context) {
|
ggml_tensor* context) {
|
||||||
int num_input_blocks = 15;
|
int num_input_blocks = 15;
|
||||||
auto h = hint;
|
auto h = hint;
|
||||||
for (int i = 0; i < num_input_blocks; i++) {
|
for (int i = 0; i < num_input_blocks; i++) {
|
||||||
@ -198,13 +198,13 @@ public:
|
|||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::vector<ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* hint,
|
ggml_tensor* hint,
|
||||||
struct ggml_tensor* guided_hint,
|
ggml_tensor* guided_hint,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* y = nullptr) {
|
ggml_tensor* y = nullptr) {
|
||||||
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
|
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
|
||||||
// timesteps: [N,]
|
// timesteps: [N,]
|
||||||
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
|
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
|
||||||
@ -246,7 +246,7 @@ public:
|
|||||||
emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim]
|
emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim]
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor*> outs;
|
std::vector<ggml_tensor*> outs;
|
||||||
|
|
||||||
if (guided_hint == nullptr) {
|
if (guided_hint == nullptr) {
|
||||||
guided_hint = input_hint_block_forward(ctx, hint, emb, context);
|
guided_hint = input_hint_block_forward(ctx, hint, emb, context);
|
||||||
@ -312,9 +312,9 @@ struct ControlNet : public GGMLRunner {
|
|||||||
|
|
||||||
ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory
|
ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory
|
||||||
ggml_context* control_ctx = nullptr;
|
ggml_context* control_ctx = nullptr;
|
||||||
std::vector<struct ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5
|
std::vector<ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5
|
||||||
struct ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference
|
ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference
|
||||||
bool guided_hint_cached = false;
|
bool guided_hint_cached = false;
|
||||||
|
|
||||||
ControlNet(ggml_backend_t backend,
|
ControlNet(ggml_backend_t backend,
|
||||||
bool offload_params_to_cpu,
|
bool offload_params_to_cpu,
|
||||||
@ -328,8 +328,8 @@ struct ControlNet : public GGMLRunner {
|
|||||||
free_control_ctx();
|
free_control_ctx();
|
||||||
}
|
}
|
||||||
|
|
||||||
void alloc_control_ctx(std::vector<struct ggml_tensor*> outs) {
|
void alloc_control_ctx(std::vector<ggml_tensor*> outs) {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024;
|
params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024;
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = true;
|
params.no_alloc = true;
|
||||||
@ -370,16 +370,16 @@ struct ControlNet : public GGMLRunner {
|
|||||||
return "control_net";
|
return "control_net";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
control_net.get_param_tensors(tensors, prefix);
|
control_net.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* hint,
|
ggml_tensor* hint,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* y = nullptr) {
|
ggml_tensor* y = nullptr) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
if (guided_hint_cached) {
|
if (guided_hint_cached) {
|
||||||
@ -414,18 +414,18 @@ struct ControlNet : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* hint,
|
ggml_tensor* hint,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// timesteps: [N, ]
|
// timesteps: [N, ]
|
||||||
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
|
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
|
||||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, hint, timesteps, context, y);
|
return build_graph(x, hint, timesteps, context, y);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -773,8 +773,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
// sample_euler_ancestral
|
// sample_euler_ancestral
|
||||||
switch (method) {
|
switch (method) {
|
||||||
case EULER_A_SAMPLE_METHOD: {
|
case EULER_A_SAMPLE_METHOD: {
|
||||||
struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
@ -830,7 +830,7 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
} break;
|
} break;
|
||||||
case EULER_SAMPLE_METHOD: // Implemented without any sigma churn
|
case EULER_SAMPLE_METHOD: // Implemented without any sigma churn
|
||||||
{
|
{
|
||||||
struct ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
@ -865,8 +865,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case HEUN_SAMPLE_METHOD: {
|
case HEUN_SAMPLE_METHOD: {
|
||||||
struct ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
// denoise
|
// denoise
|
||||||
@ -921,8 +921,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case DPM2_SAMPLE_METHOD: {
|
case DPM2_SAMPLE_METHOD: {
|
||||||
struct ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
// denoise
|
// denoise
|
||||||
@ -979,8 +979,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
|
|
||||||
} break;
|
} break;
|
||||||
case DPMPP2S_A_SAMPLE_METHOD: {
|
case DPMPP2S_A_SAMPLE_METHOD: {
|
||||||
struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
// denoise
|
// denoise
|
||||||
@ -1050,7 +1050,7 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
} break;
|
} break;
|
||||||
case DPMPP2M_SAMPLE_METHOD: // DPM++ (2M) from Karras et al (2022)
|
case DPMPP2M_SAMPLE_METHOD: // DPM++ (2M) from Karras et al (2022)
|
||||||
{
|
{
|
||||||
struct ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
auto t_fn = [](float sigma) -> float { return -log(sigma); };
|
auto t_fn = [](float sigma) -> float { return -log(sigma); };
|
||||||
|
|
||||||
@ -1092,7 +1092,7 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
} break;
|
} break;
|
||||||
case DPMPP2Mv2_SAMPLE_METHOD: // Modified DPM++ (2M) from https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/8457
|
case DPMPP2Mv2_SAMPLE_METHOD: // Modified DPM++ (2M) from https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/8457
|
||||||
{
|
{
|
||||||
struct ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
auto t_fn = [](float sigma) -> float { return -log(sigma); };
|
auto t_fn = [](float sigma) -> float { return -log(sigma); };
|
||||||
|
|
||||||
@ -1157,8 +1157,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
}
|
}
|
||||||
float* vec_denoised = (float*)denoised->data;
|
float* vec_denoised = (float*)denoised->data;
|
||||||
// d_cur = (x_cur - denoised) / sigma
|
// d_cur = (x_cur - denoised) / sigma
|
||||||
struct ggml_tensor* d_cur = ggml_dup_tensor(work_ctx, x_cur);
|
ggml_tensor* d_cur = ggml_dup_tensor(work_ctx, x_cur);
|
||||||
float* vec_d_cur = (float*)d_cur->data;
|
float* vec_d_cur = (float*)d_cur->data;
|
||||||
|
|
||||||
for (int j = 0; j < ggml_nelements(d_cur); j++) {
|
for (int j = 0; j < ggml_nelements(d_cur); j++) {
|
||||||
vec_d_cur[j] = (vec_x_cur[j] - vec_denoised[j]) / sigma;
|
vec_d_cur[j] = (vec_x_cur[j] - vec_denoised[j]) / sigma;
|
||||||
@ -1225,11 +1225,11 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
float t_next = sigmas[i + 1];
|
float t_next = sigmas[i + 1];
|
||||||
|
|
||||||
// Denoising step
|
// Denoising step
|
||||||
ggml_tensor* denoised = model(x, sigma, i + 1);
|
ggml_tensor* denoised = model(x, sigma, i + 1);
|
||||||
float* vec_denoised = (float*)denoised->data;
|
float* vec_denoised = (float*)denoised->data;
|
||||||
struct ggml_tensor* d_cur = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* d_cur = ggml_dup_tensor(work_ctx, x);
|
||||||
float* vec_d_cur = (float*)d_cur->data;
|
float* vec_d_cur = (float*)d_cur->data;
|
||||||
float* vec_x = (float*)x->data;
|
float* vec_x = (float*)x->data;
|
||||||
|
|
||||||
// d_cur = (x - denoised) / sigma
|
// d_cur = (x - denoised) / sigma
|
||||||
for (int j = 0; j < ggml_nelements(d_cur); j++) {
|
for (int j = 0; j < ggml_nelements(d_cur); j++) {
|
||||||
@ -1290,8 +1290,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
} break;
|
} break;
|
||||||
case LCM_SAMPLE_METHOD: // Latent Consistency Models
|
case LCM_SAMPLE_METHOD: // Latent Consistency Models
|
||||||
{
|
{
|
||||||
struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* d = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
@ -1358,9 +1358,9 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
alphas_cumprod[i]);
|
alphas_cumprod[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* pred_original_sample =
|
ggml_tensor* pred_original_sample =
|
||||||
ggml_dup_tensor(work_ctx, x);
|
ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* variance_noise =
|
ggml_tensor* variance_noise =
|
||||||
ggml_dup_tensor(work_ctx, x);
|
ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
@ -1422,7 +1422,7 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
// model_output = model() is the D(x, sigma) as
|
// model_output = model() is the D(x, sigma) as
|
||||||
// defined in Karras et al. (2022), p. 3, Table 1 and
|
// defined in Karras et al. (2022), p. 3, Table 1 and
|
||||||
// p. 8 (7), compare also p. 38 (226) therein.
|
// p. 8 (7), compare also p. 38 (226) therein.
|
||||||
struct ggml_tensor* model_output =
|
ggml_tensor* model_output =
|
||||||
model(x, sigma, i + 1);
|
model(x, sigma, i + 1);
|
||||||
// Here model_output is still the k-diffusion denoiser
|
// Here model_output is still the k-diffusion denoiser
|
||||||
// output, not the U-net output F_theta(c_in(sigma) x;
|
// output, not the U-net output F_theta(c_in(sigma) x;
|
||||||
@ -1545,9 +1545,9 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
}
|
}
|
||||||
int original_steps = 50;
|
int original_steps = 50;
|
||||||
|
|
||||||
struct ggml_tensor* pred_original_sample =
|
ggml_tensor* pred_original_sample =
|
||||||
ggml_dup_tensor(work_ctx, x);
|
ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* noise =
|
ggml_tensor* noise =
|
||||||
ggml_dup_tensor(work_ctx, x);
|
ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
@ -1581,7 +1581,7 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
vec_x[j] *= std::sqrt(sigma * sigma + 1);
|
vec_x[j] *= std::sqrt(sigma * sigma + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
struct ggml_tensor* model_output =
|
ggml_tensor* model_output =
|
||||||
model(x, sigma, i + 1);
|
model(x, sigma, i + 1);
|
||||||
{
|
{
|
||||||
float* vec_x = (float*)x->data;
|
float* vec_x = (float*)x->data;
|
||||||
@ -1689,8 +1689,8 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
} break;
|
} break;
|
||||||
case RES_MULTISTEP_SAMPLE_METHOD: // Res Multistep sampler
|
case RES_MULTISTEP_SAMPLE_METHOD: // Res Multistep sampler
|
||||||
{
|
{
|
||||||
struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* old_denoised = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
bool have_old_sigma = false;
|
bool have_old_sigma = false;
|
||||||
float old_sigma_down = 0.0f;
|
float old_sigma_down = 0.0f;
|
||||||
@ -1797,9 +1797,9 @@ static bool sample_k_diffusion(sample_method_t method,
|
|||||||
} break;
|
} break;
|
||||||
case RES_2S_SAMPLE_METHOD: // Res 2s sampler
|
case RES_2S_SAMPLE_METHOD: // Res 2s sampler
|
||||||
{
|
{
|
||||||
struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* noise = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* x0 = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* x0 = ggml_dup_tensor(work_ctx, x);
|
||||||
struct ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
ggml_tensor* x2 = ggml_dup_tensor(work_ctx, x);
|
||||||
|
|
||||||
const float c2 = 0.5f;
|
const float c2 = 0.5f;
|
||||||
auto t_fn = [](float sigma) -> float { return -logf(sigma); };
|
auto t_fn = [](float sigma) -> float { return -logf(sigma); };
|
||||||
|
|||||||
@ -10,33 +10,33 @@
|
|||||||
#include "z_image.hpp"
|
#include "z_image.hpp"
|
||||||
|
|
||||||
struct DiffusionParams {
|
struct DiffusionParams {
|
||||||
struct ggml_tensor* x = nullptr;
|
ggml_tensor* x = nullptr;
|
||||||
struct ggml_tensor* timesteps = nullptr;
|
ggml_tensor* timesteps = nullptr;
|
||||||
struct ggml_tensor* context = nullptr;
|
ggml_tensor* context = nullptr;
|
||||||
struct ggml_tensor* c_concat = nullptr;
|
ggml_tensor* c_concat = nullptr;
|
||||||
struct ggml_tensor* y = nullptr;
|
ggml_tensor* y = nullptr;
|
||||||
struct ggml_tensor* guidance = nullptr;
|
ggml_tensor* guidance = nullptr;
|
||||||
std::vector<ggml_tensor*> ref_latents = {};
|
std::vector<ggml_tensor*> ref_latents = {};
|
||||||
bool increase_ref_index = false;
|
bool increase_ref_index = false;
|
||||||
int num_video_frames = -1;
|
int num_video_frames = -1;
|
||||||
std::vector<struct ggml_tensor*> controls = {};
|
std::vector<ggml_tensor*> controls = {};
|
||||||
float control_strength = 0.f;
|
float control_strength = 0.f;
|
||||||
struct ggml_tensor* vace_context = nullptr;
|
ggml_tensor* vace_context = nullptr;
|
||||||
float vace_strength = 1.f;
|
float vace_strength = 1.f;
|
||||||
std::vector<int> skip_layers = {};
|
std::vector<int> skip_layers = {};
|
||||||
};
|
};
|
||||||
|
|
||||||
struct DiffusionModel {
|
struct DiffusionModel {
|
||||||
virtual std::string get_desc() = 0;
|
virtual std::string get_desc() = 0;
|
||||||
virtual bool compute(int n_threads,
|
virtual bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) = 0;
|
ggml_context* output_ctx = nullptr) = 0;
|
||||||
virtual void alloc_params_buffer() = 0;
|
virtual void alloc_params_buffer() = 0;
|
||||||
virtual void free_params_buffer() = 0;
|
virtual void free_params_buffer() = 0;
|
||||||
virtual void free_compute_buffer() = 0;
|
virtual void free_compute_buffer() = 0;
|
||||||
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) = 0;
|
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) = 0;
|
||||||
virtual size_t get_params_buffer_size() = 0;
|
virtual size_t get_params_buffer_size() = 0;
|
||||||
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){};
|
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){};
|
||||||
virtual int64_t get_adm_in_channels() = 0;
|
virtual int64_t get_adm_in_channels() = 0;
|
||||||
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
||||||
@ -69,7 +69,7 @@ struct UNetModel : public DiffusionModel {
|
|||||||
unet.free_compute_buffer();
|
unet.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
unet.get_param_tensors(tensors, "model.diffusion_model");
|
unet.get_param_tensors(tensors, "model.diffusion_model");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -95,8 +95,8 @@ struct UNetModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return unet.compute(n_threads,
|
return unet.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
@ -134,7 +134,7 @@ struct MMDiTModel : public DiffusionModel {
|
|||||||
mmdit.free_compute_buffer();
|
mmdit.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
mmdit.get_param_tensors(tensors, "model.diffusion_model");
|
mmdit.get_param_tensors(tensors, "model.diffusion_model");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,8 +160,8 @@ struct MMDiTModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return mmdit.compute(n_threads,
|
return mmdit.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
@ -200,7 +200,7 @@ struct FluxModel : public DiffusionModel {
|
|||||||
flux.free_compute_buffer();
|
flux.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
flux.get_param_tensors(tensors, "model.diffusion_model");
|
flux.get_param_tensors(tensors, "model.diffusion_model");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -226,8 +226,8 @@ struct FluxModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return flux.compute(n_threads,
|
return flux.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
@ -270,7 +270,7 @@ struct AnimaModel : public DiffusionModel {
|
|||||||
anima.free_compute_buffer();
|
anima.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
anima.get_param_tensors(tensors, prefix);
|
anima.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -296,8 +296,8 @@ struct AnimaModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return anima.compute(n_threads,
|
return anima.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
@ -337,7 +337,7 @@ struct WanModel : public DiffusionModel {
|
|||||||
wan.free_compute_buffer();
|
wan.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
wan.get_param_tensors(tensors, prefix);
|
wan.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -363,8 +363,8 @@ struct WanModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return wan.compute(n_threads,
|
return wan.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
@ -408,7 +408,7 @@ struct QwenImageModel : public DiffusionModel {
|
|||||||
qwen_image.free_compute_buffer();
|
qwen_image.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
qwen_image.get_param_tensors(tensors, prefix);
|
qwen_image.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -434,8 +434,8 @@ struct QwenImageModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return qwen_image.compute(n_threads,
|
return qwen_image.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
@ -475,7 +475,7 @@ struct ZImageModel : public DiffusionModel {
|
|||||||
z_image.free_compute_buffer();
|
z_image.free_compute_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||||
z_image.get_param_tensors(tensors, prefix);
|
z_image.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -501,8 +501,8 @@ struct ZImageModel : public DiffusionModel {
|
|||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
DiffusionParams diffusion_params,
|
DiffusionParams diffusion_params,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
return z_image.compute(n_threads,
|
return z_image.compute(n_threads,
|
||||||
diffusion_params.x,
|
diffusion_params.x,
|
||||||
diffusion_params.timesteps,
|
diffusion_params.timesteps,
|
||||||
|
|||||||
@ -27,11 +27,11 @@ public:
|
|||||||
blocks["conv5"] = std::shared_ptr<GGMLBlock>(new Conv2d(num_feat + 4 * num_grow_ch, num_feat, {3, 3}, {1, 1}, {1, 1}));
|
blocks["conv5"] = std::shared_ptr<GGMLBlock>(new Conv2d(num_feat + 4 * num_grow_ch, num_feat, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* lrelu(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* lrelu(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
|
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [n, num_feat, h, w]
|
// x: [n, num_feat, h, w]
|
||||||
// return: [n, num_feat, h, w]
|
// return: [n, num_feat, h, w]
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ public:
|
|||||||
blocks["rdb3"] = std::shared_ptr<GGMLBlock>(new ResidualDenseBlock(num_feat, num_grow_ch));
|
blocks["rdb3"] = std::shared_ptr<GGMLBlock>(new ResidualDenseBlock(num_feat, num_grow_ch));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [n, num_feat, h, w]
|
// x: [n, num_feat, h, w]
|
||||||
// return: [n, num_feat, h, w]
|
// return: [n, num_feat, h, w]
|
||||||
|
|
||||||
@ -112,11 +112,11 @@ public:
|
|||||||
int get_scale() { return scale; }
|
int get_scale() { return scale; }
|
||||||
int get_num_block() { return num_block; }
|
int get_num_block() { return num_block; }
|
||||||
|
|
||||||
struct ggml_tensor* lrelu(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* lrelu(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
|
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [n, num_in_ch, h, w]
|
// x: [n, num_in_ch, h, w]
|
||||||
// return: [n, num_out_ch, h*scale, w*scale]
|
// return: [n, num_out_ch, h*scale, w*scale]
|
||||||
auto conv_first = std::dynamic_pointer_cast<Conv2d>(blocks["conv_first"]);
|
auto conv_first = std::dynamic_pointer_cast<Conv2d>(blocks["conv_first"]);
|
||||||
@ -341,24 +341,24 @@ struct ESRGAN : public GGMLRunner {
|
|||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x) {
|
ggml_cgraph* build_graph(ggml_tensor* x) {
|
||||||
if (!rrdb_net)
|
if (!rrdb_net)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
constexpr int kGraphNodes = 1 << 16; // 65k
|
constexpr int kGraphNodes = 1 << 16; // 65k
|
||||||
struct ggml_cgraph* gf = new_graph_custom(kGraphNodes);
|
ggml_cgraph* gf = new_graph_custom(kGraphNodes);
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
struct ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
|
ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool compute(const int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x);
|
return build_graph(x);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||||
|
|||||||
311
src/flux.hpp
311
src/flux.hpp
@ -19,7 +19,7 @@ namespace Flux {
|
|||||||
blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, bias));
|
blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [..., in_dim]
|
// x: [..., in_dim]
|
||||||
// return: [..., hidden_dim]
|
// return: [..., hidden_dim]
|
||||||
auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]);
|
auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]);
|
||||||
@ -37,7 +37,7 @@ namespace Flux {
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
float eps;
|
float eps;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
ggml_type wtype = GGML_TYPE_F32;
|
ggml_type wtype = GGML_TYPE_F32;
|
||||||
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||||
}
|
}
|
||||||
@ -48,10 +48,10 @@ namespace Flux {
|
|||||||
: hidden_size(hidden_size),
|
: hidden_size(hidden_size),
|
||||||
eps(eps) {}
|
eps(eps) {}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
struct ggml_tensor* w = params["scale"];
|
ggml_tensor* w = params["scale"];
|
||||||
x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
|
x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
|
||||||
x = ggml_mul(ctx->ggml_ctx, x, w);
|
x = ggml_mul(ctx->ggml_ctx, x, w);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -63,7 +63,7 @@ namespace Flux {
|
|||||||
blocks["key_norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(dim));
|
blocks["key_norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* query_norm(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* query_norm(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [..., dim]
|
// x: [..., dim]
|
||||||
// return: [..., dim]
|
// return: [..., dim]
|
||||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["query_norm"]);
|
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["query_norm"]);
|
||||||
@ -72,7 +72,7 @@ namespace Flux {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* key_norm(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* key_norm(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [..., dim]
|
// x: [..., dim]
|
||||||
// return: [..., dim]
|
// return: [..., dim]
|
||||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["key_norm"]);
|
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["key_norm"]);
|
||||||
@ -98,32 +98,34 @@ namespace Flux {
|
|||||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim, proj_bias));
|
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim, proj_bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
std::vector<ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
||||||
auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]);
|
||||||
|
|
||||||
auto qkv = qkv_proj->forward(ctx, x);
|
auto qkv = qkv_proj->forward(ctx, x);
|
||||||
auto qkv_vec = ggml_ext_chunk(ctx->ggml_ctx, qkv, 3, 0, true);
|
int64_t head_dim = qkv->ne[0] / 3 / num_heads;
|
||||||
int64_t head_dim = qkv_vec[0]->ne[0] / num_heads;
|
auto q = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2],
|
||||||
auto q = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[0], head_dim, num_heads, qkv_vec[0]->ne[1], qkv_vec[0]->ne[2]);
|
qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], 0);
|
||||||
auto k = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[1], head_dim, num_heads, qkv_vec[1]->ne[1], qkv_vec[1]->ne[2]);
|
auto k = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2],
|
||||||
auto v = ggml_reshape_4d(ctx->ggml_ctx, qkv_vec[2], head_dim, num_heads, qkv_vec[2]->ne[1], qkv_vec[2]->ne[2]);
|
qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], (qkv->nb[0]) * qkv->ne[0] / 3);
|
||||||
|
auto v = ggml_view_4d(ctx->ggml_ctx, qkv, head_dim, num_heads, qkv->ne[1], qkv->ne[2],
|
||||||
|
qkv->nb[0] * head_dim, qkv->nb[1], qkv->nb[2], (qkv->nb[0]) * 2 * qkv->ne[0] / 3);
|
||||||
q = norm->query_norm(ctx, q);
|
q = norm->query_norm(ctx, q);
|
||||||
k = norm->key_norm(ctx, k);
|
k = norm->key_norm(ctx, k);
|
||||||
return {q, k, v};
|
return {q, k, v};
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||||
|
|
||||||
x = proj->forward(ctx, x); // [N, n_token, dim]
|
x = proj->forward(ctx, x); // [N, n_token, dim]
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask) {
|
ggml_tensor* mask) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// pe: [n_token, d_head/2, 2, 2]
|
// pe: [n_token, d_head/2, 2, 2]
|
||||||
// return [N, n_token, dim]
|
// return [N, n_token, dim]
|
||||||
@ -145,7 +147,7 @@ namespace Flux {
|
|||||||
blocks["2"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
|
blocks["2"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
|
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
|
||||||
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||||
|
|
||||||
@ -168,7 +170,7 @@ namespace Flux {
|
|||||||
blocks["down_proj"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
|
blocks["down_proj"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
|
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
|
||||||
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
|
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
|
||||||
auto down_proj = std::dynamic_pointer_cast<Linear>(blocks["down_proj"]);
|
auto down_proj = std::dynamic_pointer_cast<Linear>(blocks["down_proj"]);
|
||||||
@ -210,7 +212,7 @@ namespace Flux {
|
|||||||
blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier, bias));
|
blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier, bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<ModulationOut> forward(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
std::vector<ModulationOut> forward(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||||
// x: [N, dim]
|
// x: [N, dim]
|
||||||
// return: [ModulationOut, ModulationOut]
|
// return: [ModulationOut, ModulationOut]
|
||||||
auto lin = std::dynamic_pointer_cast<Linear>(blocks["lin"]);
|
auto lin = std::dynamic_pointer_cast<Linear>(blocks["lin"]);
|
||||||
@ -230,11 +232,11 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx,
|
__STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* shift,
|
ggml_tensor* shift,
|
||||||
struct ggml_tensor* scale,
|
ggml_tensor* scale,
|
||||||
bool skip_reshape = false) {
|
bool skip_reshape = false) {
|
||||||
// x: [N, L, C]
|
// x: [N, L, C]
|
||||||
// scale: [N, C]
|
// scale: [N, C]
|
||||||
// shift: [N, C]
|
// shift: [N, C]
|
||||||
@ -292,7 +294,7 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<ModulationOut> get_distil_img_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
std::vector<ModulationOut> get_distil_img_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||||
// TODO: not hardcoded?
|
// TODO: not hardcoded?
|
||||||
const int single_blocks_count = 38;
|
const int single_blocks_count = 38;
|
||||||
const int double_blocks_count = 19;
|
const int double_blocks_count = 19;
|
||||||
@ -301,7 +303,7 @@ namespace Flux {
|
|||||||
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
|
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<ModulationOut> get_distil_txt_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
std::vector<ModulationOut> get_distil_txt_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||||
// TODO: not hardcoded?
|
// TODO: not hardcoded?
|
||||||
const int single_blocks_count = 38;
|
const int single_blocks_count = 38;
|
||||||
const int double_blocks_count = 19;
|
const int double_blocks_count = 19;
|
||||||
@ -310,14 +312,14 @@ namespace Flux {
|
|||||||
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
|
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* img,
|
ggml_tensor* img,
|
||||||
struct ggml_tensor* txt,
|
ggml_tensor* txt,
|
||||||
struct ggml_tensor* vec,
|
ggml_tensor* vec,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
std::vector<ModulationOut> img_mods = {},
|
std::vector<ModulationOut> img_mods = {},
|
||||||
std::vector<ModulationOut> txt_mods = {}) {
|
std::vector<ModulationOut> txt_mods = {}) {
|
||||||
// img: [N, n_img_token, hidden_size]
|
// img: [N, n_img_token, hidden_size]
|
||||||
// txt: [N, n_txt_token, hidden_size]
|
// txt: [N, n_txt_token, hidden_size]
|
||||||
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
||||||
@ -455,17 +457,17 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||||
int64_t offset = 3 * idx;
|
int64_t offset = 3 * idx;
|
||||||
return ModulationOut(ctx, vec, offset);
|
return ModulationOut(ctx, vec, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* vec,
|
ggml_tensor* vec,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
std::vector<ModulationOut> mods = {}) {
|
std::vector<ModulationOut> mods = {}) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// pe: [n_token, d_head/2, 2, 2]
|
// pe: [n_token, d_head/2, 2, 2]
|
||||||
// return: [N, n_token, hidden_size]
|
// return: [N, n_token, hidden_size]
|
||||||
@ -491,15 +493,14 @@ namespace Flux {
|
|||||||
auto x_mod = Flux::modulate(ctx->ggml_ctx, pre_norm->forward(ctx, x), mod.shift, mod.scale);
|
auto x_mod = Flux::modulate(ctx->ggml_ctx, pre_norm->forward(ctx, x), mod.shift, mod.scale);
|
||||||
auto qkv_mlp = linear1->forward(ctx, x_mod); // [N, n_token, hidden_size * 3 + mlp_hidden_dim*mlp_mult_factor]
|
auto qkv_mlp = linear1->forward(ctx, x_mod); // [N, n_token, hidden_size * 3 + mlp_hidden_dim*mlp_mult_factor]
|
||||||
|
|
||||||
auto q = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, hidden_size, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], 0);
|
|
||||||
auto k = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, hidden_size, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], hidden_size * qkv_mlp->nb[0]);
|
|
||||||
auto v = ggml_view_3d(ctx->ggml_ctx, qkv_mlp, hidden_size, qkv_mlp->ne[1], qkv_mlp->ne[2], qkv_mlp->nb[1], qkv_mlp->nb[2], hidden_size * 2 * qkv_mlp->nb[0]);
|
|
||||||
|
|
||||||
int64_t head_dim = hidden_size / num_heads;
|
int64_t head_dim = hidden_size / num_heads;
|
||||||
|
|
||||||
q = ggml_reshape_4d(ctx->ggml_ctx, ggml_cont(ctx->ggml_ctx, q), head_dim, num_heads, q->ne[1], q->ne[2]); // [N, n_token, n_head, d_head]
|
auto q = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2],
|
||||||
k = ggml_reshape_4d(ctx->ggml_ctx, ggml_cont(ctx->ggml_ctx, k), head_dim, num_heads, k->ne[1], k->ne[2]); // [N, n_token, n_head, d_head]
|
qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], 0);
|
||||||
v = ggml_reshape_4d(ctx->ggml_ctx, ggml_cont(ctx->ggml_ctx, v), head_dim, num_heads, v->ne[1], v->ne[2]); // [N, n_token, n_head, d_head]
|
auto k = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2],
|
||||||
|
qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], (qkv_mlp->nb[0]) * hidden_size);
|
||||||
|
auto v = ggml_view_4d(ctx->ggml_ctx, qkv_mlp, head_dim, num_heads, qkv_mlp->ne[1], qkv_mlp->ne[2],
|
||||||
|
qkv_mlp->nb[0] * head_dim, qkv_mlp->nb[1], qkv_mlp->nb[2], (qkv_mlp->nb[0]) * 2 * hidden_size);
|
||||||
|
|
||||||
q = norm->query_norm(ctx, q);
|
q = norm->query_norm(ctx, q);
|
||||||
k = norm->key_norm(ctx, k);
|
k = norm->key_norm(ctx, k);
|
||||||
@ -538,7 +539,7 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||||
int64_t offset = vec->ne[2] - 2;
|
int64_t offset = vec->ne[2] - 2;
|
||||||
int64_t stride = vec->nb[1] * vec->ne[1];
|
int64_t stride = vec->nb[1] * vec->ne[1];
|
||||||
auto shift = ggml_view_2d(ctx->ggml_ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim]
|
auto shift = ggml_view_2d(ctx->ggml_ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim]
|
||||||
@ -547,15 +548,15 @@ namespace Flux {
|
|||||||
return {shift, scale, nullptr};
|
return {shift, scale, nullptr};
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||||
auto norm_final = std::dynamic_pointer_cast<LayerNorm>(blocks["norm_final"]);
|
auto norm_final = std::dynamic_pointer_cast<LayerNorm>(blocks["norm_final"]);
|
||||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
||||||
struct ggml_tensor *shift, *scale;
|
ggml_tensor *shift, *scale;
|
||||||
if (prune_mod) {
|
if (prune_mod) {
|
||||||
auto mod = get_distil_mod(ctx, c);
|
auto mod = get_distil_mod(ctx, c);
|
||||||
shift = mod.shift;
|
shift = mod.shift;
|
||||||
@ -588,7 +589,7 @@ namespace Flux {
|
|||||||
blocks["out_proj"] = std::shared_ptr<GGMLBlock>(new Linear(inner_size, hidden_size, true));
|
blocks["out_proj"] = std::shared_ptr<GGMLBlock>(new Linear(inner_size, hidden_size, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto in_proj = std::dynamic_pointer_cast<Linear>(blocks["in_proj"]);
|
auto in_proj = std::dynamic_pointer_cast<Linear>(blocks["in_proj"]);
|
||||||
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out_proj"]);
|
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out_proj"]);
|
||||||
|
|
||||||
@ -611,9 +612,9 @@ namespace Flux {
|
|||||||
blocks["embedder.0"] = std::make_shared<Linear>(in_channels + max_freqs * max_freqs, hidden_size_input);
|
blocks["embedder.0"] = std::make_shared<Linear>(in_channels + max_freqs * max_freqs, hidden_size_input);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* dct) {
|
ggml_tensor* dct) {
|
||||||
// x: (B, P^2, C)
|
// x: (B, P^2, C)
|
||||||
// dct: (1, P^2, max_freqs^2)
|
// dct: (1, P^2, max_freqs^2)
|
||||||
// return: (B, P^2, hidden_size_input)
|
// return: (B, P^2, hidden_size_input)
|
||||||
@ -638,9 +639,9 @@ namespace Flux {
|
|||||||
blocks["norm"] = std::make_shared<RMSNorm>(hidden_size_x);
|
blocks["norm"] = std::make_shared<RMSNorm>(hidden_size_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* s) {
|
ggml_tensor* s) {
|
||||||
// x: (batch_size, n_token, hidden_size_x)
|
// x: (batch_size, n_token, hidden_size_x)
|
||||||
// s: (batch_size, hidden_size_s)
|
// s: (batch_size, hidden_size_s)
|
||||||
// return: (batch_size, n_token, hidden_size_x)
|
// return: (batch_size, n_token, hidden_size_x)
|
||||||
@ -688,8 +689,8 @@ namespace Flux {
|
|||||||
blocks["linear"] = std::make_shared<Linear>(hidden_size, out_channels);
|
blocks["linear"] = std::make_shared<Linear>(hidden_size, out_channels);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
|
||||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
||||||
|
|
||||||
@ -707,8 +708,8 @@ namespace Flux {
|
|||||||
blocks["conv"] = std::make_shared<Conv2d>(hidden_size, out_channels, std::pair{3, 3}, std::pair{1, 1}, std::pair{1, 1});
|
blocks["conv"] = std::make_shared<Conv2d>(hidden_size, out_channels, std::pair{3, 3}, std::pair{1, 1}, std::pair{1, 1});
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
// x: [N, C, H, W]
|
// x: [N, C, H, W]
|
||||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
|
||||||
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
||||||
@ -846,15 +847,15 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* img,
|
ggml_tensor* img,
|
||||||
struct ggml_tensor* txt,
|
ggml_tensor* txt,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor* guidance,
|
ggml_tensor* guidance,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mod_index_arange = nullptr,
|
ggml_tensor* mod_index_arange = nullptr,
|
||||||
std::vector<int> skip_layers = {}) {
|
std::vector<int> skip_layers = {}) {
|
||||||
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
|
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
|
||||||
auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]);
|
auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]);
|
||||||
auto final_layer = std::dynamic_pointer_cast<LastLayer>(blocks["final_layer"]);
|
auto final_layer = std::dynamic_pointer_cast<LastLayer>(blocks["final_layer"]);
|
||||||
@ -863,8 +864,8 @@ namespace Flux {
|
|||||||
img = img_in->forward(ctx, img);
|
img = img_in->forward(ctx, img);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* vec;
|
ggml_tensor* vec;
|
||||||
struct ggml_tensor* txt_img_mask = nullptr;
|
ggml_tensor* txt_img_mask = nullptr;
|
||||||
if (params.is_chroma) {
|
if (params.is_chroma) {
|
||||||
int64_t mod_index_length = 344;
|
int64_t mod_index_length = 344;
|
||||||
auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]);
|
auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]);
|
||||||
@ -966,27 +967,27 @@ namespace Flux {
|
|||||||
return img;
|
return img;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx,
|
ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* predicted,
|
ggml_tensor* predicted,
|
||||||
struct ggml_tensor* noisy,
|
ggml_tensor* noisy,
|
||||||
struct ggml_tensor* timesteps) {
|
ggml_tensor* timesteps) {
|
||||||
auto x = ggml_sub(ctx->ggml_ctx, noisy, predicted);
|
auto x = ggml_sub(ctx->ggml_ctx, noisy, predicted);
|
||||||
x = ggml_div(ctx->ggml_ctx, x, timesteps);
|
x = ggml_div(ctx->ggml_ctx, x, timesteps);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor* guidance,
|
ggml_tensor* guidance,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mod_index_arange = nullptr,
|
ggml_tensor* mod_index_arange = nullptr,
|
||||||
struct ggml_tensor* dct = nullptr,
|
ggml_tensor* dct = nullptr,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
std::vector<int> skip_layers = {}) {
|
std::vector<int> skip_layers = {}) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
|
|
||||||
int64_t W = x->ne[0];
|
int64_t W = x->ne[0];
|
||||||
@ -1049,18 +1050,18 @@ namespace Flux {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_flux_chroma(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_flux_chroma(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor* guidance,
|
ggml_tensor* guidance,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mod_index_arange = nullptr,
|
ggml_tensor* mod_index_arange = nullptr,
|
||||||
struct ggml_tensor* dct = nullptr,
|
ggml_tensor* dct = nullptr,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
std::vector<int> skip_layers = {}) {
|
std::vector<int> skip_layers = {}) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
|
|
||||||
int64_t W = x->ne[0];
|
int64_t W = x->ne[0];
|
||||||
@ -1118,18 +1119,18 @@ namespace Flux {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor* guidance,
|
ggml_tensor* guidance,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mod_index_arange = nullptr,
|
ggml_tensor* mod_index_arange = nullptr,
|
||||||
struct ggml_tensor* dct = nullptr,
|
ggml_tensor* dct = nullptr,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
std::vector<int> skip_layers = {}) {
|
std::vector<int> skip_layers = {}) {
|
||||||
// Forward pass of DiT.
|
// Forward pass of DiT.
|
||||||
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
|
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
|
||||||
// timestep: (N,) tensor of diffusion timesteps
|
// timestep: (N,) tensor of diffusion timesteps
|
||||||
@ -1298,7 +1299,7 @@ namespace Flux {
|
|||||||
return "flux";
|
return "flux";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
flux.get_param_tensors(tensors, prefix);
|
flux.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1352,20 +1353,20 @@ namespace Flux {
|
|||||||
return dct;
|
return dct;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor* guidance,
|
ggml_tensor* guidance,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
bool increase_ref_index = false,
|
bool increase_ref_index = false,
|
||||||
std::vector<int> skip_layers = {}) {
|
std::vector<int> skip_layers = {}) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
struct ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
|
||||||
|
|
||||||
struct ggml_tensor* mod_index_arange = nullptr;
|
ggml_tensor* mod_index_arange = nullptr;
|
||||||
struct ggml_tensor* dct = nullptr; // for chroma radiance
|
ggml_tensor* dct = nullptr; // for chroma radiance
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
context = to_backend(context);
|
context = to_backend(context);
|
||||||
@ -1436,18 +1437,18 @@ namespace Flux {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = flux.forward(&runner_ctx,
|
ggml_tensor* out = flux.forward(&runner_ctx,
|
||||||
x,
|
x,
|
||||||
timesteps,
|
timesteps,
|
||||||
context,
|
context,
|
||||||
c_concat,
|
c_concat,
|
||||||
y,
|
y,
|
||||||
guidance,
|
guidance,
|
||||||
pe,
|
pe,
|
||||||
mod_index_arange,
|
mod_index_arange,
|
||||||
dct,
|
dct,
|
||||||
ref_latents,
|
ref_latents,
|
||||||
skip_layers);
|
skip_layers);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
@ -1455,23 +1456,23 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor* guidance,
|
ggml_tensor* guidance,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
bool increase_ref_index = false,
|
bool increase_ref_index = false,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr,
|
ggml_context* output_ctx = nullptr,
|
||||||
std::vector<int> skip_layers = std::vector<int>()) {
|
std::vector<int> skip_layers = std::vector<int>()) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// timesteps: [N, ]
|
// timesteps: [N, ]
|
||||||
// context: [N, max_position, hidden_size]
|
// context: [N, max_position, hidden_size]
|
||||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||||
// guidance: [N, ]
|
// guidance: [N, ]
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers);
|
return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1479,12 +1480,12 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -1512,7 +1513,7 @@ namespace Flux {
|
|||||||
auto y = nullptr;
|
auto y = nullptr;
|
||||||
// print_ggml_tensor(y);
|
// print_ggml_tensor(y);
|
||||||
|
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx);
|
compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -163,7 +163,7 @@ const float sd_latent_rgb_proj[4][3] = {
|
|||||||
{-0.178022f, -0.200862f, -0.678514f}};
|
{-0.178022f, -0.200862f, -0.678514f}};
|
||||||
float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f};
|
float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f};
|
||||||
|
|
||||||
void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
|
void preview_latent_video(uint8_t* buffer, ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
|
||||||
size_t buffer_head = 0;
|
size_t buffer_head = 0;
|
||||||
|
|
||||||
uint32_t latent_width = static_cast<uint32_t>(latents->ne[0]);
|
uint32_t latent_width = static_cast<uint32_t>(latents->ne[0]);
|
||||||
|
|||||||
180
src/llm.hpp
180
src/llm.hpp
@ -522,7 +522,7 @@ namespace LLM {
|
|||||||
blocks["down_proj"] = std::shared_ptr<GGMLBlock>(new Linear(intermediate_size, hidden_size, bias));
|
blocks["down_proj"] = std::shared_ptr<GGMLBlock>(new Linear(intermediate_size, hidden_size, bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
|
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
|
||||||
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
|
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
|
||||||
@ -582,7 +582,7 @@ namespace LLM {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N*grid_t*grid_h*grid_w, in_channels, temporal_patch_size*patch_size*patch_size]
|
// x: [N*grid_t*grid_h*grid_w, in_channels, temporal_patch_size*patch_size*patch_size]
|
||||||
// return: [N*grid_t*grid_h*grid_w, embed_dim]
|
// return: [N*grid_t*grid_h*grid_w, embed_dim]
|
||||||
x = ggml_reshape_4d(ctx->ggml_ctx,
|
x = ggml_reshape_4d(ctx->ggml_ctx,
|
||||||
@ -631,7 +631,7 @@ namespace LLM {
|
|||||||
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, dim));
|
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto ln_q = std::dynamic_pointer_cast<RMSNorm>(blocks["ln_q"]);
|
auto ln_q = std::dynamic_pointer_cast<RMSNorm>(blocks["ln_q"]);
|
||||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
||||||
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["mlp.2"]);
|
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["mlp.2"]);
|
||||||
@ -668,10 +668,10 @@ namespace LLM {
|
|||||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size));
|
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr) {
|
ggml_tensor* mask = nullptr) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
int64_t n_token = x->ne[1];
|
int64_t n_token = x->ne[1];
|
||||||
int64_t N = x->ne[2];
|
int64_t N = x->ne[2];
|
||||||
@ -718,10 +718,10 @@ namespace LLM {
|
|||||||
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new RMSNorm(hidden_size, eps));
|
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new RMSNorm(hidden_size, eps));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr) {
|
ggml_tensor* mask = nullptr) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
auto attn = std::dynamic_pointer_cast<VisionAttention>(blocks["attn"]);
|
auto attn = std::dynamic_pointer_cast<VisionAttention>(blocks["attn"]);
|
||||||
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
|
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
|
||||||
@ -778,12 +778,12 @@ namespace LLM {
|
|||||||
blocks["merger"] = std::shared_ptr<GGMLBlock>(new PatchMerger(out_hidden_size, hidden_size, spatial_merge_size));
|
blocks["merger"] = std::shared_ptr<GGMLBlock>(new PatchMerger(out_hidden_size, hidden_size, spatial_merge_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* pixel_values,
|
ggml_tensor* pixel_values,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* window_index,
|
ggml_tensor* window_index,
|
||||||
struct ggml_tensor* window_inverse_index,
|
ggml_tensor* window_inverse_index,
|
||||||
struct ggml_tensor* window_mask) {
|
ggml_tensor* window_mask) {
|
||||||
// pixel_values: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw]
|
// pixel_values: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw]
|
||||||
// window_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
|
// window_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
|
||||||
// window_inverse_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
|
// window_inverse_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
|
||||||
@ -836,10 +836,10 @@ namespace LLM {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* input_pos,
|
ggml_tensor* input_pos,
|
||||||
struct ggml_tensor* attention_mask = nullptr) {
|
ggml_tensor* attention_mask = nullptr) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
int64_t n_token = x->ne[1];
|
int64_t n_token = x->ne[1];
|
||||||
int64_t N = x->ne[2];
|
int64_t N = x->ne[2];
|
||||||
@ -898,10 +898,10 @@ namespace LLM {
|
|||||||
blocks["post_attention_layernorm"] = std::make_shared<RMSNorm>(params.hidden_size, params.rms_norm_eps);
|
blocks["post_attention_layernorm"] = std::make_shared<RMSNorm>(params.hidden_size, params.rms_norm_eps);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* input_pos,
|
ggml_tensor* input_pos,
|
||||||
struct ggml_tensor* attention_mask = nullptr) {
|
ggml_tensor* attention_mask = nullptr) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
auto self_attn = std::dynamic_pointer_cast<Attention>(blocks["self_attn"]);
|
auto self_attn = std::dynamic_pointer_cast<Attention>(blocks["self_attn"]);
|
||||||
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
|
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
|
||||||
@ -936,12 +936,12 @@ namespace LLM {
|
|||||||
blocks["norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(params.hidden_size, params.rms_norm_eps));
|
blocks["norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(params.hidden_size, params.rms_norm_eps));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* input_pos,
|
ggml_tensor* input_pos,
|
||||||
struct ggml_tensor* attention_mask,
|
ggml_tensor* attention_mask,
|
||||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||||
std::set<int> out_layers) {
|
std::set<int> out_layers) {
|
||||||
// input_ids: [N, n_token]
|
// input_ids: [N, n_token]
|
||||||
// return: [N, n_token, hidden_size]
|
// return: [N, n_token, hidden_size]
|
||||||
|
|
||||||
@ -1037,12 +1037,12 @@ namespace LLM {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* input_pos,
|
ggml_tensor* input_pos,
|
||||||
struct ggml_tensor* attention_mask,
|
ggml_tensor* attention_mask,
|
||||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||||
std::set<int> out_layers) {
|
std::set<int> out_layers) {
|
||||||
// input_ids: [N, n_token]
|
// input_ids: [N, n_token]
|
||||||
auto model = std::dynamic_pointer_cast<TextModel>(blocks["model"]);
|
auto model = std::dynamic_pointer_cast<TextModel>(blocks["model"]);
|
||||||
|
|
||||||
@ -1050,12 +1050,12 @@ namespace LLM {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* pixel_values,
|
ggml_tensor* pixel_values,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* window_index,
|
ggml_tensor* window_index,
|
||||||
struct ggml_tensor* window_inverse_index,
|
ggml_tensor* window_inverse_index,
|
||||||
struct ggml_tensor* window_mask) {
|
ggml_tensor* window_mask) {
|
||||||
GGML_ASSERT(enable_vision);
|
GGML_ASSERT(enable_vision);
|
||||||
auto vision_model = std::dynamic_pointer_cast<VisionModel>(blocks["visual"]);
|
auto vision_model = std::dynamic_pointer_cast<VisionModel>(blocks["visual"]);
|
||||||
return vision_model->forward(ctx, pixel_values, pe, window_index, window_inverse_index, window_mask);
|
return vision_model->forward(ctx, pixel_values, pe, window_index, window_inverse_index, window_mask);
|
||||||
@ -1156,35 +1156,35 @@ namespace LLM {
|
|||||||
return llm_arch_to_str[static_cast<int>(params.arch)];
|
return llm_arch_to_str[static_cast<int>(params.arch)];
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
model.get_param_tensors(tensors, prefix);
|
model.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* input_pos,
|
ggml_tensor* input_pos,
|
||||||
struct ggml_tensor* attention_mask,
|
ggml_tensor* attention_mask,
|
||||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||||
std::set<int> out_layers) {
|
std::set<int> out_layers) {
|
||||||
auto hidden_states = model.forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); // [N, n_token, hidden_size]
|
auto hidden_states = model.forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); // [N, n_token, hidden_size]
|
||||||
return hidden_states;
|
return hidden_states;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* pixel_values,
|
ggml_tensor* pixel_values,
|
||||||
struct ggml_tensor* input_pos,
|
ggml_tensor* input_pos,
|
||||||
struct ggml_tensor* window_index,
|
ggml_tensor* window_index,
|
||||||
struct ggml_tensor* window_inverse_index,
|
ggml_tensor* window_inverse_index,
|
||||||
struct ggml_tensor* window_mask) {
|
ggml_tensor* window_mask) {
|
||||||
auto hidden_states = model.vision_forward(ctx, pixel_values, input_pos, window_index, window_inverse_index, window_mask);
|
auto hidden_states = model.vision_forward(ctx, pixel_values, input_pos, window_index, window_inverse_index, window_mask);
|
||||||
return hidden_states;
|
return hidden_states;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
|
ggml_cgraph* build_graph(ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* attention_mask,
|
ggml_tensor* attention_mask,
|
||||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||||
std::set<int> out_layers) {
|
std::set<int> out_layers) {
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
|
||||||
input_ids = to_backend(input_ids);
|
input_ids = to_backend(input_ids);
|
||||||
|
|
||||||
@ -1232,7 +1232,7 @@ namespace LLM {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers);
|
ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, hidden_states);
|
ggml_build_forward_expand(gf, hidden_states);
|
||||||
|
|
||||||
@ -1240,13 +1240,13 @@ namespace LLM {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool compute(const int n_threads,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* attention_mask,
|
ggml_tensor* attention_mask,
|
||||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||||
std::set<int> out_layers,
|
std::set<int> out_layers,
|
||||||
ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(input_ids, attention_mask, image_embeds, out_layers);
|
return build_graph(input_ids, attention_mask, image_embeds, out_layers);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||||
@ -1261,7 +1261,7 @@ namespace LLM {
|
|||||||
return grid_t * grid_h * grid_w;
|
return grid_t * grid_h * grid_w;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* process_image(struct ggml_context* ctx, struct ggml_tensor* image) {
|
ggml_tensor* process_image(ggml_context* ctx, ggml_tensor* image) {
|
||||||
// image: [C, H, W]
|
// image: [C, H, W]
|
||||||
// return: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw], grid_t == 1
|
// return: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw], grid_t == 1
|
||||||
int64_t C = image->ne[2];
|
int64_t C = image->ne[2];
|
||||||
@ -1288,8 +1288,8 @@ namespace LLM {
|
|||||||
return image;
|
return image;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_encode_image_graph(struct ggml_tensor* image) {
|
ggml_cgraph* build_encode_image_graph(ggml_tensor* image) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
|
||||||
|
|
||||||
GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
|
GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
|
||||||
GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
|
GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
|
||||||
@ -1399,23 +1399,23 @@ namespace LLM {
|
|||||||
// pe->data = nullptr;
|
// pe->data = nullptr;
|
||||||
set_backend_tensor_data(pe, pe_vec.data());
|
set_backend_tensor_data(pe, pe_vec.data());
|
||||||
|
|
||||||
auto runnter_ctx = get_context();
|
auto runnter_ctx = get_context();
|
||||||
struct ggml_tensor* hidden_states = vision_forward(&runnter_ctx,
|
ggml_tensor* hidden_states = vision_forward(&runnter_ctx,
|
||||||
pixel_values,
|
pixel_values,
|
||||||
pe,
|
pe,
|
||||||
window_index,
|
window_index,
|
||||||
window_inverse_index,
|
window_inverse_index,
|
||||||
window_mask);
|
window_mask);
|
||||||
ggml_build_forward_expand(gf, hidden_states);
|
ggml_build_forward_expand(gf, hidden_states);
|
||||||
|
|
||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void encode_image(const int n_threads,
|
void encode_image(const int n_threads,
|
||||||
struct ggml_tensor* image,
|
ggml_tensor* image,
|
||||||
ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_encode_image_graph(image);
|
return build_encode_image_graph(image);
|
||||||
};
|
};
|
||||||
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||||
@ -1440,7 +1440,7 @@ namespace LLM {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
model.get_param_tensors(tensors, prefix);
|
model.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1492,12 +1492,12 @@ namespace LLM {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
bool test_mistral = false;
|
bool test_mistral = false;
|
||||||
bool test_qwen3 = true;
|
bool test_qwen3 = true;
|
||||||
@ -1509,7 +1509,7 @@ namespace LLM {
|
|||||||
{
|
{
|
||||||
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
|
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
|
||||||
print_ggml_tensor(image, false, "image");
|
print_ggml_tensor(image, false, "image");
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.encode_image(8, image, &out, work_ctx);
|
model.encode_image(8, image, &out, work_ctx);
|
||||||
@ -1547,8 +1547,8 @@ namespace LLM {
|
|||||||
printf("%d ", token);
|
printf("%d ", token);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx);
|
model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx);
|
||||||
@ -1561,7 +1561,7 @@ namespace LLM {
|
|||||||
// ggml_set_f32(image, 0.f);
|
// ggml_set_f32(image, 0.f);
|
||||||
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
|
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
|
||||||
print_ggml_tensor(image, false, "image");
|
print_ggml_tensor(image, false, "image");
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.encode_image(8, image, &out, work_ctx);
|
model.encode_image(8, image, &out, work_ctx);
|
||||||
@ -1587,8 +1587,8 @@ namespace LLM {
|
|||||||
printf("%d ", token);
|
printf("%d ", token);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx);
|
model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx);
|
||||||
@ -1610,8 +1610,8 @@ namespace LLM {
|
|||||||
printf("%d ", token);
|
printf("%d ", token);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx);
|
model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx);
|
||||||
@ -1633,8 +1633,8 @@ namespace LLM {
|
|||||||
printf("%d ", token);
|
printf("%d ", token);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx);
|
model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx);
|
||||||
|
|||||||
32
src/lora.hpp
32
src/lora.hpp
@ -9,7 +9,7 @@
|
|||||||
struct LoraModel : public GGMLRunner {
|
struct LoraModel : public GGMLRunner {
|
||||||
std::string lora_id;
|
std::string lora_id;
|
||||||
float multiplier = 1.0f;
|
float multiplier = 1.0f;
|
||||||
std::unordered_map<std::string, struct ggml_tensor*> lora_tensors;
|
std::unordered_map<std::string, ggml_tensor*> lora_tensors;
|
||||||
std::map<ggml_tensor*, ggml_tensor*> original_tensor_to_final_tensor;
|
std::map<ggml_tensor*, ggml_tensor*> original_tensor_to_final_tensor;
|
||||||
std::set<std::string> applied_lora_tensors;
|
std::set<std::string> applied_lora_tensors;
|
||||||
std::string file_path;
|
std::string file_path;
|
||||||
@ -76,13 +76,13 @@ struct LoraModel : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const auto& pair : tensors_to_create) {
|
for (const auto& pair : tensors_to_create) {
|
||||||
const auto& name = pair.first;
|
const auto& name = pair.first;
|
||||||
const auto& ts = pair.second;
|
const auto& ts = pair.second;
|
||||||
struct ggml_tensor* real = ggml_new_tensor(params_ctx,
|
ggml_tensor* real = ggml_new_tensor(params_ctx,
|
||||||
ts.type,
|
ts.type,
|
||||||
ts.n_dims,
|
ts.n_dims,
|
||||||
ts.ne);
|
ts.ne);
|
||||||
lora_tensors[name] = real;
|
lora_tensors[name] = real;
|
||||||
}
|
}
|
||||||
|
|
||||||
alloc_params_buffer();
|
alloc_params_buffer();
|
||||||
@ -337,10 +337,10 @@ struct LoraModel : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
scale_value *= multiplier;
|
scale_value *= multiplier;
|
||||||
|
|
||||||
struct ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid);
|
ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid);
|
||||||
struct ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid);
|
ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid);
|
||||||
auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2);
|
auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2);
|
||||||
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
|
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
|
||||||
if (updown == nullptr) {
|
if (updown == nullptr) {
|
||||||
updown = curr_updown;
|
updown = curr_updown;
|
||||||
} else {
|
} else {
|
||||||
@ -747,9 +747,9 @@ struct LoraModel : public GGMLRunner {
|
|||||||
return out_diff;
|
return out_diff;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_lora_graph(const std::map<std::string, ggml_tensor*>& model_tensors, SDVersion version) {
|
ggml_cgraph* build_lora_graph(const std::map<std::string, ggml_tensor*>& model_tensors, SDVersion version) {
|
||||||
size_t lora_graph_size = LORA_GRAPH_BASE_SIZE + lora_tensors.size() * 10;
|
size_t lora_graph_size = LORA_GRAPH_BASE_SIZE + lora_tensors.size() * 10;
|
||||||
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, lora_graph_size, false);
|
ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, lora_graph_size, false);
|
||||||
|
|
||||||
preprocess_lora_tensors(model_tensors);
|
preprocess_lora_tensors(model_tensors);
|
||||||
|
|
||||||
@ -788,8 +788,8 @@ struct LoraModel : public GGMLRunner {
|
|||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void apply(std::map<std::string, struct ggml_tensor*> model_tensors, SDVersion version, int n_threads) {
|
void apply(std::map<std::string, ggml_tensor*> model_tensors, SDVersion version, int n_threads) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_lora_graph(model_tensors, version);
|
return build_lora_graph(model_tensors, version);
|
||||||
};
|
};
|
||||||
GGMLRunner::compute(get_graph, n_threads, false);
|
GGMLRunner::compute(get_graph, n_threads, false);
|
||||||
|
|||||||
@ -26,9 +26,9 @@ namespace LTXV {
|
|||||||
bias));
|
bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
bool causal = true) {
|
bool causal = true) {
|
||||||
// x: [N*IC, ID, IH, IW]
|
// x: [N*IC, ID, IH, IW]
|
||||||
// result: [N*OC, OD, OH, OW]
|
// result: [N*OC, OD, OH, OW]
|
||||||
auto conv = std::dynamic_pointer_cast<Conv3d>(blocks["conv"]);
|
auto conv = std::dynamic_pointer_cast<Conv3d>(blocks["conv"]);
|
||||||
|
|||||||
174
src/mmdit.hpp
174
src/mmdit.hpp
@ -27,7 +27,7 @@ public:
|
|||||||
blocks["fc2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_features, out_features, bias));
|
blocks["fc2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_features, out_features, bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, n_token, in_features]
|
// x: [N, n_token, in_features]
|
||||||
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
||||||
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
||||||
@ -72,7 +72,7 @@ public:
|
|||||||
bias));
|
bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, C, H, W]
|
// x: [N, C, H, W]
|
||||||
// return: [N, H*W, embed_dim]
|
// return: [N, H*W, embed_dim]
|
||||||
auto proj = std::dynamic_pointer_cast<Conv2d>(blocks["proj"]);
|
auto proj = std::dynamic_pointer_cast<Conv2d>(blocks["proj"]);
|
||||||
@ -111,7 +111,7 @@ public:
|
|||||||
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, out_channels, true, true));
|
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, out_channels, true, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* t) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* t) {
|
||||||
// t: [N, ]
|
// t: [N, ]
|
||||||
// return: [N, hidden_size]
|
// return: [N, hidden_size]
|
||||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
||||||
@ -135,7 +135,7 @@ public:
|
|||||||
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size, true, true));
|
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size, true, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, input_dim]
|
// x: [N, input_dim]
|
||||||
// return: [N, hidden_size]
|
// return: [N, hidden_size]
|
||||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
||||||
@ -175,7 +175,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
std::vector<ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
||||||
|
|
||||||
auto qkv = qkv_proj->forward(ctx, x);
|
auto qkv = qkv_proj->forward(ctx, x);
|
||||||
@ -198,7 +198,7 @@ public:
|
|||||||
return {q, k, v};
|
return {q, k, v};
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
GGML_ASSERT(!pre_only);
|
GGML_ASSERT(!pre_only);
|
||||||
|
|
||||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||||
@ -208,8 +208,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
auto qkv = pre_attention(ctx, x);
|
auto qkv = pre_attention(ctx, x);
|
||||||
x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim]
|
x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim]
|
||||||
x = post_attention(ctx, x); // [N, n_token, dim]
|
x = post_attention(ctx, x); // [N, n_token, dim]
|
||||||
@ -217,10 +217,10 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx,
|
__STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* shift,
|
ggml_tensor* shift,
|
||||||
struct ggml_tensor* scale) {
|
ggml_tensor* scale) {
|
||||||
// x: [N, L, C]
|
// x: [N, L, C]
|
||||||
// scale: [N, C]
|
// scale: [N, C]
|
||||||
// shift: [N, C]
|
// shift: [N, C]
|
||||||
@ -274,8 +274,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention_x(GGMLRunnerContext* ctx,
|
std::tuple<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention_x(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
GGML_ASSERT(self_attn);
|
GGML_ASSERT(self_attn);
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
@ -309,9 +309,9 @@ public:
|
|||||||
return {qkv, qkv2, {x, gate_msa, shift_mlp, scale_mlp, gate_mlp, gate_msa2}};
|
return {qkv, qkv2, {x, gate_msa, shift_mlp, scale_mlp, gate_mlp, gate_msa2}};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<struct ggml_tensor*>, std::vector<struct ggml_tensor*>> pre_attention(GGMLRunnerContext* ctx,
|
std::pair<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
auto norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm1"]);
|
auto norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm1"]);
|
||||||
@ -346,15 +346,15 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* post_attention_x(GGMLRunnerContext* ctx,
|
ggml_tensor* post_attention_x(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* attn_out,
|
ggml_tensor* attn_out,
|
||||||
struct ggml_tensor* attn2_out,
|
ggml_tensor* attn2_out,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* gate_msa,
|
ggml_tensor* gate_msa,
|
||||||
struct ggml_tensor* shift_mlp,
|
ggml_tensor* shift_mlp,
|
||||||
struct ggml_tensor* scale_mlp,
|
ggml_tensor* scale_mlp,
|
||||||
struct ggml_tensor* gate_mlp,
|
ggml_tensor* gate_mlp,
|
||||||
struct ggml_tensor* gate_msa2) {
|
ggml_tensor* gate_msa2) {
|
||||||
// attn_out: [N, n_token, hidden_size]
|
// attn_out: [N, n_token, hidden_size]
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// gate_msa: [N, hidden_size]
|
// gate_msa: [N, hidden_size]
|
||||||
@ -384,13 +384,13 @@ public:
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx,
|
ggml_tensor* post_attention(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* attn_out,
|
ggml_tensor* attn_out,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* gate_msa,
|
ggml_tensor* gate_msa,
|
||||||
struct ggml_tensor* shift_mlp,
|
ggml_tensor* shift_mlp,
|
||||||
struct ggml_tensor* scale_mlp,
|
ggml_tensor* scale_mlp,
|
||||||
struct ggml_tensor* gate_mlp) {
|
ggml_tensor* gate_mlp) {
|
||||||
// attn_out: [N, n_token, hidden_size]
|
// attn_out: [N, n_token, hidden_size]
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// gate_msa: [N, hidden_size]
|
// gate_msa: [N, hidden_size]
|
||||||
@ -416,9 +416,9 @@ public:
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
// return: [N, n_token, hidden_size]
|
// return: [N, n_token, hidden_size]
|
||||||
@ -463,11 +463,11 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
__STATIC_INLINE__ std::pair<struct ggml_tensor*, struct ggml_tensor*>
|
__STATIC_INLINE__ std::pair<ggml_tensor*, ggml_tensor*>
|
||||||
block_mixing(GGMLRunnerContext* ctx,
|
block_mixing(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c,
|
ggml_tensor* c,
|
||||||
std::shared_ptr<DismantledBlock> context_block,
|
std::shared_ptr<DismantledBlock> context_block,
|
||||||
std::shared_ptr<DismantledBlock> x_block) {
|
std::shared_ptr<DismantledBlock> x_block) {
|
||||||
// context: [N, n_context, hidden_size]
|
// context: [N, n_context, hidden_size]
|
||||||
@ -489,7 +489,7 @@ block_mixing(GGMLRunnerContext* ctx,
|
|||||||
x_qkv = x_qkv_intermediates.first;
|
x_qkv = x_qkv_intermediates.first;
|
||||||
x_intermediates = x_qkv_intermediates.second;
|
x_intermediates = x_qkv_intermediates.second;
|
||||||
}
|
}
|
||||||
std::vector<struct ggml_tensor*> qkv;
|
std::vector<ggml_tensor*> qkv;
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
qkv.push_back(ggml_concat(ctx->ggml_ctx, context_qkv[i], x_qkv[i], 1));
|
qkv.push_back(ggml_concat(ctx->ggml_ctx, context_qkv[i], x_qkv[i], 1));
|
||||||
}
|
}
|
||||||
@ -563,10 +563,10 @@ public:
|
|||||||
blocks["x_block"] = std::shared_ptr<GGMLBlock>(new DismantledBlock(hidden_size, num_heads, mlp_ratio, qk_norm, qkv_bias, false, self_attn_x));
|
blocks["x_block"] = std::shared_ptr<GGMLBlock>(new DismantledBlock(hidden_size, num_heads, mlp_ratio, qk_norm, qkv_bias, false, self_attn_x));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
auto context_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["context_block"]);
|
auto context_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["context_block"]);
|
||||||
auto x_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["x_block"]);
|
auto x_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["x_block"]);
|
||||||
|
|
||||||
@ -586,9 +586,9 @@ public:
|
|||||||
blocks["adaLN_modulation.1"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, 2 * hidden_size));
|
blocks["adaLN_modulation.1"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, 2 * hidden_size));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||||
@ -626,7 +626,7 @@ protected:
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
std::string qk_norm;
|
std::string qk_norm;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
||||||
enum ggml_type wtype = GGML_TYPE_F32;
|
enum ggml_type wtype = GGML_TYPE_F32;
|
||||||
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
|
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
|
||||||
}
|
}
|
||||||
@ -705,8 +705,8 @@ public:
|
|||||||
blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new FinalLayer(hidden_size, patch_size, out_channels));
|
blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new FinalLayer(hidden_size, patch_size, out_channels));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor*
|
ggml_tensor*
|
||||||
cropped_pos_embed(struct ggml_context* ctx,
|
cropped_pos_embed(ggml_context* ctx,
|
||||||
int64_t h,
|
int64_t h,
|
||||||
int64_t w) {
|
int64_t w) {
|
||||||
auto pos_embed = params["pos_embed"];
|
auto pos_embed = params["pos_embed"];
|
||||||
@ -745,11 +745,11 @@ public:
|
|||||||
return spatial_pos_embed;
|
return spatial_pos_embed;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c_mod,
|
ggml_tensor* c_mod,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
std::vector<int> skip_layers = std::vector<int>()) {
|
std::vector<int> skip_layers = std::vector<int>()) {
|
||||||
// x: [N, H*W, hidden_size]
|
// x: [N, H*W, hidden_size]
|
||||||
// context: [N, n_context, d_context]
|
// context: [N, n_context, d_context]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
@ -774,12 +774,12 @@ public:
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* t,
|
ggml_tensor* t,
|
||||||
struct ggml_tensor* y = nullptr,
|
ggml_tensor* y = nullptr,
|
||||||
struct ggml_tensor* context = nullptr,
|
ggml_tensor* context = nullptr,
|
||||||
std::vector<int> skip_layers = std::vector<int>()) {
|
std::vector<int> skip_layers = std::vector<int>()) {
|
||||||
// Forward pass of DiT.
|
// Forward pass of DiT.
|
||||||
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
|
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
|
||||||
// t: (N,) tensor of diffusion timesteps
|
// t: (N,) tensor of diffusion timesteps
|
||||||
@ -832,29 +832,29 @@ struct MMDiTRunner : public GGMLRunner {
|
|||||||
return "mmdit";
|
return "mmdit";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
mmdit.get_param_tensors(tensors, prefix);
|
mmdit.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
std::vector<int> skip_layers = std::vector<int>()) {
|
std::vector<int> skip_layers = std::vector<int>()) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
context = to_backend(context);
|
context = to_backend(context);
|
||||||
y = to_backend(y);
|
y = to_backend(y);
|
||||||
timesteps = to_backend(timesteps);
|
timesteps = to_backend(timesteps);
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
struct ggml_tensor* out = mmdit.forward(&runner_ctx,
|
ggml_tensor* out = mmdit.forward(&runner_ctx,
|
||||||
x,
|
x,
|
||||||
timesteps,
|
timesteps,
|
||||||
y,
|
y,
|
||||||
context,
|
context,
|
||||||
skip_layers);
|
skip_layers);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
@ -862,18 +862,18 @@ struct MMDiTRunner : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr,
|
ggml_context* output_ctx = nullptr,
|
||||||
std::vector<int> skip_layers = std::vector<int>()) {
|
std::vector<int> skip_layers = std::vector<int>()) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// timesteps: [N, ]
|
// timesteps: [N, ]
|
||||||
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
|
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
|
||||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, y, skip_layers);
|
return build_graph(x, timesteps, context, y, skip_layers);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -881,12 +881,12 @@ struct MMDiTRunner : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -908,7 +908,7 @@ struct MMDiTRunner : public GGMLRunner {
|
|||||||
ggml_set_f32(y, 0.01f);
|
ggml_set_f32(y, 0.01f);
|
||||||
// print_ggml_tensor(y);
|
// print_ggml_tensor(y);
|
||||||
|
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, x, timesteps, context, y, &out, work_ctx);
|
compute(8, x, timesteps, context, y, &out, work_ctx);
|
||||||
|
|||||||
@ -287,7 +287,7 @@ void ModelLoader::add_tensor_storage(const TensorStorage& tensor_storage) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool is_zip_file(const std::string& file_path) {
|
bool is_zip_file(const std::string& file_path) {
|
||||||
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
||||||
if (zip == nullptr) {
|
if (zip == nullptr) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -453,9 +453,9 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s
|
|||||||
size_t total_size = 0;
|
size_t total_size = 0;
|
||||||
size_t data_offset = gguf_get_data_offset(ctx_gguf_);
|
size_t data_offset = gguf_get_data_offset(ctx_gguf_);
|
||||||
for (int i = 0; i < n_tensors; i++) {
|
for (int i = 0; i < n_tensors; i++) {
|
||||||
std::string name = gguf_get_tensor_name(ctx_gguf_, i);
|
std::string name = gguf_get_tensor_name(ctx_gguf_, i);
|
||||||
struct ggml_tensor* dummy = ggml_get_tensor(ctx_meta_, name.c_str());
|
ggml_tensor* dummy = ggml_get_tensor(ctx_meta_, name.c_str());
|
||||||
size_t offset = data_offset + gguf_get_tensor_offset(ctx_gguf_, i);
|
size_t offset = data_offset + gguf_get_tensor_offset(ctx_gguf_, i);
|
||||||
|
|
||||||
// LOG_DEBUG("%s", name.c_str());
|
// LOG_DEBUG("%s", name.c_str());
|
||||||
|
|
||||||
@ -812,7 +812,7 @@ struct PickleTensorReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void read_string(const std::string& str, struct zip_t* zip, std::string dir) {
|
void read_string(const std::string& str, zip_t* zip, std::string dir) {
|
||||||
if (str == "storage") {
|
if (str == "storage") {
|
||||||
read_global_type = true;
|
read_global_type = true;
|
||||||
} else if (str != "state_dict") {
|
} else if (str != "state_dict") {
|
||||||
@ -995,7 +995,7 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
|
|||||||
file_paths_.push_back(file_path);
|
file_paths_.push_back(file_path);
|
||||||
size_t file_index = file_paths_.size() - 1;
|
size_t file_index = file_paths_.size() - 1;
|
||||||
|
|
||||||
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
||||||
if (zip == nullptr) {
|
if (zip == nullptr) {
|
||||||
LOG_ERROR("failed to open '%s'", file_path.c_str());
|
LOG_ERROR("failed to open '%s'", file_path.c_str());
|
||||||
return false;
|
return false;
|
||||||
@ -1104,10 +1104,12 @@ SDVersion ModelLoader::get_sd_version() {
|
|||||||
tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) {
|
tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) {
|
||||||
has_middle_block_1 = true;
|
has_middle_block_1 = true;
|
||||||
}
|
}
|
||||||
if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos) {
|
if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos ||
|
||||||
|
tensor_storage.name.find("unet.up_blocks.1.attentions.0.transformer_blocks.1") != std::string::npos) {
|
||||||
has_output_block_311 = true;
|
has_output_block_311 = true;
|
||||||
}
|
}
|
||||||
if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos) {
|
if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos ||
|
||||||
|
tensor_storage.name.find("unet.up_blocks.2.attentions.1") != std::string::npos) {
|
||||||
has_output_block_71 = true;
|
has_output_block_71 = true;
|
||||||
}
|
}
|
||||||
if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" ||
|
if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" ||
|
||||||
@ -1411,7 +1413,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
|||||||
for (int i = 0; i < n_threads; ++i) {
|
for (int i = 0; i < n_threads; ++i) {
|
||||||
workers.emplace_back([&, file_path, is_zip]() {
|
workers.emplace_back([&, file_path, is_zip]() {
|
||||||
std::ifstream file;
|
std::ifstream file;
|
||||||
struct zip_t* zip = nullptr;
|
zip_t* zip = nullptr;
|
||||||
if (is_zip) {
|
if (is_zip) {
|
||||||
zip = zip_open(file_path.c_str(), 0, 'r');
|
zip = zip_open(file_path.c_str(), 0, 'r');
|
||||||
if (zip == nullptr) {
|
if (zip == nullptr) {
|
||||||
@ -1599,7 +1601,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
|||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ModelLoader::load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
|
bool ModelLoader::load_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
||||||
std::set<std::string> ignore_tensors,
|
std::set<std::string> ignore_tensors,
|
||||||
int n_threads,
|
int n_threads,
|
||||||
bool enable_mmap) {
|
bool enable_mmap) {
|
||||||
@ -1613,7 +1615,7 @@ bool ModelLoader::load_tensors(std::map<std::string, struct ggml_tensor*>& tenso
|
|||||||
tensor_names_in_file.insert(name);
|
tensor_names_in_file.insert(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* real;
|
ggml_tensor* real;
|
||||||
if (tensors.find(name) != tensors.end()) {
|
if (tensors.find(name) != tensors.end()) {
|
||||||
real = tensors[name];
|
real = tensors[name];
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -323,7 +323,7 @@ public:
|
|||||||
String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; }
|
String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; }
|
||||||
void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = "");
|
void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = "");
|
||||||
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false);
|
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false);
|
||||||
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
|
bool load_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
||||||
std::set<std::string> ignore_tensors = {},
|
std::set<std::string> ignore_tensors = {},
|
||||||
int n_threads = 0,
|
int n_threads = 0,
|
||||||
bool use_mmap = false);
|
bool use_mmap = false);
|
||||||
|
|||||||
@ -1120,7 +1120,11 @@ std::string convert_tensor_name(std::string name, SDVersion version) {
|
|||||||
for (const auto& prefix : first_stage_model_prefix_vec) {
|
for (const auto& prefix : first_stage_model_prefix_vec) {
|
||||||
if (starts_with(name, prefix)) {
|
if (starts_with(name, prefix)) {
|
||||||
name = convert_first_stage_model_name(name.substr(prefix.size()), prefix);
|
name = convert_first_stage_model_name(name.substr(prefix.size()), prefix);
|
||||||
name = prefix + name;
|
if (version == VERSION_SDXS) {
|
||||||
|
name = "tae." + name;
|
||||||
|
} else {
|
||||||
|
name = prefix + name;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
204
src/pmid.hpp
204
src/pmid.hpp
@ -21,14 +21,14 @@ public:
|
|||||||
blocks["layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(in_dim));
|
blocks["layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(in_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
|
|
||||||
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
||||||
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
||||||
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layernorm"]);
|
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layernorm"]);
|
||||||
|
|
||||||
struct ggml_tensor* r = x;
|
ggml_tensor* r = x;
|
||||||
// x = ggml_ext_layer_norm(ctx, x, ln_w, ln_b);
|
// x = ggml_ext_layer_norm(ctx, x, ln_w, ln_b);
|
||||||
x = layer_norm->forward(ctx, x);
|
x = layer_norm->forward(ctx, x);
|
||||||
// x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b);
|
// x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b);
|
||||||
@ -54,8 +54,8 @@ public:
|
|||||||
blocks["1"] = std::shared_ptr<GGMLBlock>(new Mlp(dim, inner_dim, dim, false));
|
blocks["1"] = std::shared_ptr<GGMLBlock>(new Mlp(dim, inner_dim, dim, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["0"]);
|
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["0"]);
|
||||||
auto ff = std::dynamic_pointer_cast<Mlp>(blocks["1"]);
|
auto ff = std::dynamic_pointer_cast<Mlp>(blocks["1"]);
|
||||||
|
|
||||||
@ -81,9 +81,9 @@ public:
|
|||||||
blocks["to_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim, false));
|
blocks["to_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* reshape_tensor(struct ggml_context* ctx,
|
ggml_tensor* reshape_tensor(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int heads) {
|
int heads) {
|
||||||
int64_t ne[4];
|
int64_t ne[4];
|
||||||
for (int i = 0; i < 4; ++i)
|
for (int i = 0; i < 4; ++i)
|
||||||
ne[i] = x->ne[i];
|
ne[i] = x->ne[i];
|
||||||
@ -92,17 +92,17 @@ public:
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<struct ggml_tensor*> chunk_half(struct ggml_context* ctx,
|
std::vector<ggml_tensor*> chunk_half(ggml_context* ctx,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
auto tlo = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], 0);
|
auto tlo = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], 0);
|
||||||
auto tli = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], x->nb[0] * x->ne[0] / 2);
|
auto tli = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], x->nb[0] * x->ne[0] / 2);
|
||||||
return {ggml_cont(ctx, tlo),
|
return {ggml_cont(ctx, tlo),
|
||||||
ggml_cont(ctx, tli)};
|
ggml_cont(ctx, tli)};
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* latents) {
|
ggml_tensor* latents) {
|
||||||
// x (torch.Tensor): image features
|
// x (torch.Tensor): image features
|
||||||
// shape (b, n1, D)
|
// shape (b, n1, D)
|
||||||
// latent (torch.Tensor): latent features
|
// latent (torch.Tensor): latent features
|
||||||
@ -176,9 +176,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* latents,
|
ggml_tensor* latents,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
auto proj_in = std::dynamic_pointer_cast<Linear>(blocks["proj_in"]);
|
auto proj_in = std::dynamic_pointer_cast<Linear>(blocks["proj_in"]);
|
||||||
auto proj_out = std::dynamic_pointer_cast<Linear>(blocks["proj_out"]);
|
auto proj_out = std::dynamic_pointer_cast<Linear>(blocks["proj_out"]);
|
||||||
@ -225,19 +225,19 @@ public:
|
|||||||
4));
|
4));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* last_hidden_state) {
|
ggml_tensor* last_hidden_state) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
auto token_proj = std::dynamic_pointer_cast<Mlp>(blocks["token_proj"]);
|
auto token_proj = std::dynamic_pointer_cast<Mlp>(blocks["token_proj"]);
|
||||||
auto token_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["token_norm"]);
|
auto token_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["token_norm"]);
|
||||||
auto perceiver_resampler = std::dynamic_pointer_cast<FacePerceiverResampler>(blocks["perceiver_resampler"]);
|
auto perceiver_resampler = std::dynamic_pointer_cast<FacePerceiverResampler>(blocks["perceiver_resampler"]);
|
||||||
|
|
||||||
x = token_proj->forward(ctx, x);
|
x = token_proj->forward(ctx, x);
|
||||||
int64_t nel = ggml_nelements(x);
|
int64_t nel = ggml_nelements(x);
|
||||||
x = ggml_reshape_3d(ctx->ggml_ctx, x, cross_attention_dim, num_tokens, nel / (cross_attention_dim * num_tokens));
|
x = ggml_reshape_3d(ctx->ggml_ctx, x, cross_attention_dim, num_tokens, nel / (cross_attention_dim * num_tokens));
|
||||||
x = token_norm->forward(ctx, x);
|
x = token_norm->forward(ctx, x);
|
||||||
struct ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state);
|
ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state);
|
||||||
if (use_residul)
|
if (use_residul)
|
||||||
out = ggml_add(ctx->ggml_ctx, x, out);
|
out = ggml_add(ctx->ggml_ctx, x, out);
|
||||||
return out;
|
return out;
|
||||||
@ -256,9 +256,9 @@ public:
|
|||||||
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(embed_dim));
|
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(embed_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* fuse_fn(GGMLRunnerContext* ctx,
|
ggml_tensor* fuse_fn(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* prompt_embeds,
|
ggml_tensor* prompt_embeds,
|
||||||
struct ggml_tensor* id_embeds) {
|
ggml_tensor* id_embeds) {
|
||||||
auto mlp1 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp1"]);
|
auto mlp1 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp1"]);
|
||||||
auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]);
|
auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]);
|
||||||
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]);
|
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]);
|
||||||
@ -273,24 +273,24 @@ public:
|
|||||||
return stacked_id_embeds;
|
return stacked_id_embeds;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* prompt_embeds,
|
ggml_tensor* prompt_embeds,
|
||||||
struct ggml_tensor* id_embeds,
|
ggml_tensor* id_embeds,
|
||||||
struct ggml_tensor* class_tokens_mask,
|
ggml_tensor* class_tokens_mask,
|
||||||
struct ggml_tensor* class_tokens_mask_pos,
|
ggml_tensor* class_tokens_mask_pos,
|
||||||
struct ggml_tensor* left,
|
ggml_tensor* left,
|
||||||
struct ggml_tensor* right) {
|
ggml_tensor* right) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
|
|
||||||
struct ggml_tensor* valid_id_embeds = id_embeds;
|
ggml_tensor* valid_id_embeds = id_embeds;
|
||||||
// # slice out the image token embeddings
|
// # slice out the image token embeddings
|
||||||
ggml_set_name(class_tokens_mask_pos, "class_tokens_mask_pos");
|
ggml_set_name(class_tokens_mask_pos, "class_tokens_mask_pos");
|
||||||
ggml_set_name(prompt_embeds, "prompt_embeds");
|
ggml_set_name(prompt_embeds, "prompt_embeds");
|
||||||
struct ggml_tensor* image_token_embeds = ggml_get_rows(ctx->ggml_ctx, prompt_embeds, class_tokens_mask_pos);
|
ggml_tensor* image_token_embeds = ggml_get_rows(ctx->ggml_ctx, prompt_embeds, class_tokens_mask_pos);
|
||||||
ggml_set_name(image_token_embeds, "image_token_embeds");
|
ggml_set_name(image_token_embeds, "image_token_embeds");
|
||||||
valid_id_embeds = ggml_reshape_2d(ctx->ggml_ctx, valid_id_embeds, valid_id_embeds->ne[0],
|
valid_id_embeds = ggml_reshape_2d(ctx->ggml_ctx, valid_id_embeds, valid_id_embeds->ne[0],
|
||||||
ggml_nelements(valid_id_embeds) / valid_id_embeds->ne[0]);
|
ggml_nelements(valid_id_embeds) / valid_id_embeds->ne[0]);
|
||||||
struct ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds);
|
ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds);
|
||||||
|
|
||||||
if (left && right) {
|
if (left && right) {
|
||||||
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, left, stacked_id_embeds, 1);
|
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, left, stacked_id_embeds, 1);
|
||||||
@ -301,10 +301,10 @@ public:
|
|||||||
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, stacked_id_embeds, right, 1);
|
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, stacked_id_embeds, right, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
class_tokens_mask = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, class_tokens_mask));
|
class_tokens_mask = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, class_tokens_mask));
|
||||||
class_tokens_mask = ggml_repeat(ctx->ggml_ctx, class_tokens_mask, prompt_embeds);
|
class_tokens_mask = ggml_repeat(ctx->ggml_ctx, class_tokens_mask, prompt_embeds);
|
||||||
prompt_embeds = ggml_mul(ctx->ggml_ctx, prompt_embeds, class_tokens_mask);
|
prompt_embeds = ggml_mul(ctx->ggml_ctx, prompt_embeds, class_tokens_mask);
|
||||||
struct ggml_tensor* updated_prompt_embeds = ggml_add(ctx->ggml_ctx, prompt_embeds, stacked_id_embeds);
|
ggml_tensor* updated_prompt_embeds = ggml_add(ctx->ggml_ctx, prompt_embeds, stacked_id_embeds);
|
||||||
ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds");
|
ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds");
|
||||||
return updated_prompt_embeds;
|
return updated_prompt_embeds;
|
||||||
}
|
}
|
||||||
@ -317,22 +317,22 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
|
|||||||
blocks["fuse_module"] = std::shared_ptr<GGMLBlock>(new FuseModule(2048));
|
blocks["fuse_module"] = std::shared_ptr<GGMLBlock>(new FuseModule(2048));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* id_pixel_values,
|
ggml_tensor* id_pixel_values,
|
||||||
struct ggml_tensor* prompt_embeds,
|
ggml_tensor* prompt_embeds,
|
||||||
struct ggml_tensor* class_tokens_mask,
|
ggml_tensor* class_tokens_mask,
|
||||||
struct ggml_tensor* class_tokens_mask_pos,
|
ggml_tensor* class_tokens_mask_pos,
|
||||||
struct ggml_tensor* left,
|
ggml_tensor* left,
|
||||||
struct ggml_tensor* right) {
|
ggml_tensor* right) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
||||||
auto visual_projection = std::dynamic_pointer_cast<CLIPProjection>(blocks["visual_projection"]);
|
auto visual_projection = std::dynamic_pointer_cast<CLIPProjection>(blocks["visual_projection"]);
|
||||||
auto visual_projection_2 = std::dynamic_pointer_cast<Linear>(blocks["visual_projection_2"]);
|
auto visual_projection_2 = std::dynamic_pointer_cast<Linear>(blocks["visual_projection_2"]);
|
||||||
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
|
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
|
||||||
|
|
||||||
struct ggml_tensor* shared_id_embeds = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
ggml_tensor* shared_id_embeds = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
||||||
struct ggml_tensor* id_embeds = visual_projection->forward(ctx, shared_id_embeds); // [N, proj_dim(768)]
|
ggml_tensor* id_embeds = visual_projection->forward(ctx, shared_id_embeds); // [N, proj_dim(768)]
|
||||||
struct ggml_tensor* id_embeds_2 = visual_projection_2->forward(ctx, shared_id_embeds); // [N, 1280]
|
ggml_tensor* id_embeds_2 = visual_projection_2->forward(ctx, shared_id_embeds); // [N, 1280]
|
||||||
|
|
||||||
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 2, 0, 1, 3));
|
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 2, 0, 1, 3));
|
||||||
id_embeds_2 = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds_2, 2, 0, 1, 3));
|
id_embeds_2 = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds_2, 2, 0, 1, 3));
|
||||||
@ -340,12 +340,12 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
|
|||||||
id_embeds = ggml_concat(ctx->ggml_ctx, id_embeds, id_embeds_2, 2); // [batch_size, seq_length, 1, 2048] check whether concat at dim 2 is right
|
id_embeds = ggml_concat(ctx->ggml_ctx, id_embeds, id_embeds_2, 2); // [batch_size, seq_length, 1, 2048] check whether concat at dim 2 is right
|
||||||
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 1, 2, 0, 3));
|
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 1, 2, 0, 3));
|
||||||
|
|
||||||
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
||||||
prompt_embeds,
|
prompt_embeds,
|
||||||
id_embeds,
|
id_embeds,
|
||||||
class_tokens_mask,
|
class_tokens_mask,
|
||||||
class_tokens_mask_pos,
|
class_tokens_mask_pos,
|
||||||
left, right);
|
left, right);
|
||||||
return updated_prompt_embeds;
|
return updated_prompt_embeds;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -365,29 +365,29 @@ struct PhotoMakerIDEncoder_CLIPInsightfaceExtendtokenBlock : public CLIPVisionMo
|
|||||||
num_tokens));
|
num_tokens));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* id_pixel_values,
|
ggml_tensor* id_pixel_values,
|
||||||
struct ggml_tensor* prompt_embeds,
|
ggml_tensor* prompt_embeds,
|
||||||
struct ggml_tensor* class_tokens_mask,
|
ggml_tensor* class_tokens_mask,
|
||||||
struct ggml_tensor* class_tokens_mask_pos,
|
ggml_tensor* class_tokens_mask_pos,
|
||||||
struct ggml_tensor* id_embeds,
|
ggml_tensor* id_embeds,
|
||||||
struct ggml_tensor* left,
|
ggml_tensor* left,
|
||||||
struct ggml_tensor* right) {
|
ggml_tensor* right) {
|
||||||
// x: [N, channels, h, w]
|
// x: [N, channels, h, w]
|
||||||
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
||||||
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
|
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
|
||||||
auto qformer_perceiver = std::dynamic_pointer_cast<QFormerPerceiver>(blocks["qformer_perceiver"]);
|
auto qformer_perceiver = std::dynamic_pointer_cast<QFormerPerceiver>(blocks["qformer_perceiver"]);
|
||||||
|
|
||||||
// struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
// ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
||||||
struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size]
|
ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size]
|
||||||
id_embeds = qformer_perceiver->forward(ctx, id_embeds, last_hidden_state);
|
id_embeds = qformer_perceiver->forward(ctx, id_embeds, last_hidden_state);
|
||||||
|
|
||||||
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
||||||
prompt_embeds,
|
prompt_embeds,
|
||||||
id_embeds,
|
id_embeds,
|
||||||
class_tokens_mask,
|
class_tokens_mask,
|
||||||
class_tokens_mask_pos,
|
class_tokens_mask_pos,
|
||||||
left, right);
|
left, right);
|
||||||
return updated_prompt_embeds;
|
return updated_prompt_embeds;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -436,18 +436,18 @@ public:
|
|||||||
return pm_version;
|
return pm_version;
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
if (pm_version == PM_VERSION_1)
|
if (pm_version == PM_VERSION_1)
|
||||||
id_encoder.get_param_tensors(tensors, prefix);
|
id_encoder.get_param_tensors(tensors, prefix);
|
||||||
else if (pm_version == PM_VERSION_2)
|
else if (pm_version == PM_VERSION_2)
|
||||||
id_encoder2.get_param_tensors(tensors, prefix);
|
id_encoder2.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph( // struct ggml_allocr* allocr,
|
ggml_cgraph* build_graph( // ggml_allocr* allocr,
|
||||||
struct ggml_tensor* id_pixel_values,
|
ggml_tensor* id_pixel_values,
|
||||||
struct ggml_tensor* prompt_embeds,
|
ggml_tensor* prompt_embeds,
|
||||||
std::vector<bool>& class_tokens_mask,
|
std::vector<bool>& class_tokens_mask,
|
||||||
struct ggml_tensor* id_embeds) {
|
ggml_tensor* id_embeds) {
|
||||||
ctm.clear();
|
ctm.clear();
|
||||||
ctmf16.clear();
|
ctmf16.clear();
|
||||||
ctmpos.clear();
|
ctmpos.clear();
|
||||||
@ -458,20 +458,20 @@ public:
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
|
||||||
int64_t hidden_size = prompt_embeds->ne[0];
|
int64_t hidden_size = prompt_embeds->ne[0];
|
||||||
int64_t seq_length = prompt_embeds->ne[1];
|
int64_t seq_length = prompt_embeds->ne[1];
|
||||||
ggml_type type = GGML_TYPE_F32;
|
ggml_type type = GGML_TYPE_F32;
|
||||||
|
|
||||||
struct ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
|
ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
|
||||||
|
|
||||||
struct ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values);
|
ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values);
|
||||||
struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
|
ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
|
||||||
struct ggml_tensor* id_embeds_d = to_backend(id_embeds);
|
ggml_tensor* id_embeds_d = to_backend(id_embeds);
|
||||||
|
|
||||||
struct ggml_tensor* left = nullptr;
|
ggml_tensor* left = nullptr;
|
||||||
struct ggml_tensor* right = nullptr;
|
ggml_tensor* right = nullptr;
|
||||||
for (int i = 0; i < class_tokens_mask.size(); i++) {
|
for (int i = 0; i < class_tokens_mask.size(); i++) {
|
||||||
if (class_tokens_mask[i]) {
|
if (class_tokens_mask[i]) {
|
||||||
// printf(" 1,");
|
// printf(" 1,");
|
||||||
@ -495,7 +495,7 @@ public:
|
|||||||
right = ggml_new_tensor_3d(runner_ctx.ggml_ctx, type,
|
right = ggml_new_tensor_3d(runner_ctx.ggml_ctx, type,
|
||||||
hidden_size, seq_length - ctmpos[ctmpos.size() - 1] - 1, 1);
|
hidden_size, seq_length - ctmpos[ctmpos.size() - 1] - 1, 1);
|
||||||
}
|
}
|
||||||
struct ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(runner_ctx.ggml_ctx, GGML_TYPE_I32, ctmpos.size());
|
ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(runner_ctx.ggml_ctx, GGML_TYPE_I32, ctmpos.size());
|
||||||
|
|
||||||
{
|
{
|
||||||
if (type == GGML_TYPE_F16)
|
if (type == GGML_TYPE_F16)
|
||||||
@ -526,7 +526,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
struct ggml_tensor* updated_prompt_embeds = nullptr;
|
ggml_tensor* updated_prompt_embeds = nullptr;
|
||||||
if (pm_version == PM_VERSION_1)
|
if (pm_version == PM_VERSION_1)
|
||||||
updated_prompt_embeds = id_encoder.forward(&runner_ctx,
|
updated_prompt_embeds = id_encoder.forward(&runner_ctx,
|
||||||
id_pixel_values_d,
|
id_pixel_values_d,
|
||||||
@ -549,13 +549,13 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool compute(const int n_threads,
|
||||||
struct ggml_tensor* id_pixel_values,
|
ggml_tensor* id_pixel_values,
|
||||||
struct ggml_tensor* prompt_embeds,
|
ggml_tensor* prompt_embeds,
|
||||||
struct ggml_tensor* id_embeds,
|
ggml_tensor* id_embeds,
|
||||||
std::vector<bool>& class_tokens_mask,
|
std::vector<bool>& class_tokens_mask,
|
||||||
struct ggml_tensor** updated_prompt_embeds,
|
ggml_tensor** updated_prompt_embeds,
|
||||||
ggml_context* output_ctx) {
|
ggml_context* output_ctx) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
|
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
|
||||||
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
|
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
|
||||||
};
|
};
|
||||||
@ -566,7 +566,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct PhotoMakerIDEmbed : public GGMLRunner {
|
struct PhotoMakerIDEmbed : public GGMLRunner {
|
||||||
std::map<std::string, struct ggml_tensor*> tensors;
|
std::map<std::string, ggml_tensor*> tensors;
|
||||||
std::string file_path;
|
std::string file_path;
|
||||||
ModelLoader* model_loader;
|
ModelLoader* model_loader;
|
||||||
bool load_failed = false;
|
bool load_failed = false;
|
||||||
@ -606,11 +606,11 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
if (dry_run) {
|
if (dry_run) {
|
||||||
std::lock_guard<std::mutex> lock(tensor_mutex);
|
std::lock_guard<std::mutex> lock(tensor_mutex);
|
||||||
struct ggml_tensor* real = ggml_new_tensor(params_ctx,
|
ggml_tensor* real = ggml_new_tensor(params_ctx,
|
||||||
tensor_storage.type,
|
tensor_storage.type,
|
||||||
tensor_storage.n_dims,
|
tensor_storage.n_dims,
|
||||||
tensor_storage.ne);
|
tensor_storage.ne);
|
||||||
tensors[name] = real;
|
tensors[name] = real;
|
||||||
} else {
|
} else {
|
||||||
auto real = tensors[name];
|
auto real = tensors[name];
|
||||||
*dst_tensor = real;
|
*dst_tensor = real;
|
||||||
@ -629,8 +629,8 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* get() {
|
ggml_tensor* get() {
|
||||||
std::map<std::string, struct ggml_tensor*>::iterator pos;
|
std::map<std::string, ggml_tensor*>::iterator pos;
|
||||||
pos = tensors.find("pmid.id_embeds");
|
pos = tensors.find("pmid.id_embeds");
|
||||||
if (pos != tensors.end())
|
if (pos != tensors.end())
|
||||||
return pos->second;
|
return pos->second;
|
||||||
|
|||||||
@ -4,13 +4,13 @@
|
|||||||
#include "ggml_extend.hpp"
|
#include "ggml_extend.hpp"
|
||||||
#define M_PI_ 3.14159265358979323846f
|
#define M_PI_ 3.14159265358979323846f
|
||||||
|
|
||||||
void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) {
|
void convolve(ggml_tensor* input, ggml_tensor* output, ggml_tensor* kernel, int padding) {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
|
params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
struct ggml_context* ctx0 = ggml_init(params);
|
ggml_context* ctx0 = ggml_init(params);
|
||||||
struct ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
|
ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
|
||||||
ggml_fp32_to_fp16_row((float*)kernel->data, (ggml_fp16_t*)kernel_fp16->data, ggml_nelements(kernel));
|
ggml_fp32_to_fp16_row((float*)kernel->data, (ggml_fp16_t*)kernel_fp16->data, ggml_nelements(kernel));
|
||||||
ggml_tensor* h = ggml_conv_2d(ctx0, kernel_fp16, input, 1, 1, padding, padding, 1, 1);
|
ggml_tensor* h = ggml_conv_2d(ctx0, kernel_fp16, input, 1, 1, padding, padding, 1, 1);
|
||||||
ggml_cgraph* gf = ggml_new_graph(ctx0);
|
ggml_cgraph* gf = ggml_new_graph(ctx0);
|
||||||
@ -19,7 +19,7 @@ void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml
|
|||||||
ggml_free(ctx0);
|
ggml_free(ctx0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void gaussian_kernel(struct ggml_tensor* kernel) {
|
void gaussian_kernel(ggml_tensor* kernel) {
|
||||||
int ks_mid = static_cast<int>(kernel->ne[0] / 2);
|
int ks_mid = static_cast<int>(kernel->ne[0] / 2);
|
||||||
float sigma = 1.4f;
|
float sigma = 1.4f;
|
||||||
float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f));
|
float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f));
|
||||||
@ -33,7 +33,7 @@ void gaussian_kernel(struct ggml_tensor* kernel) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void grayscale(struct ggml_tensor* rgb_img, struct ggml_tensor* grayscale) {
|
void grayscale(ggml_tensor* rgb_img, ggml_tensor* grayscale) {
|
||||||
for (int iy = 0; iy < rgb_img->ne[1]; iy++) {
|
for (int iy = 0; iy < rgb_img->ne[1]; iy++) {
|
||||||
for (int ix = 0; ix < rgb_img->ne[0]; ix++) {
|
for (int ix = 0; ix < rgb_img->ne[0]; ix++) {
|
||||||
float r = ggml_ext_tensor_get_f32(rgb_img, ix, iy);
|
float r = ggml_ext_tensor_get_f32(rgb_img, ix, iy);
|
||||||
@ -45,7 +45,7 @@ void grayscale(struct ggml_tensor* rgb_img, struct ggml_tensor* grayscale) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) {
|
void prop_hypot(ggml_tensor* x, ggml_tensor* y, ggml_tensor* h) {
|
||||||
int n_elements = static_cast<int>(ggml_nelements(h));
|
int n_elements = static_cast<int>(ggml_nelements(h));
|
||||||
float* dx = (float*)x->data;
|
float* dx = (float*)x->data;
|
||||||
float* dy = (float*)y->data;
|
float* dy = (float*)y->data;
|
||||||
@ -55,7 +55,7 @@ void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) {
|
void prop_arctan2(ggml_tensor* x, ggml_tensor* y, ggml_tensor* h) {
|
||||||
int n_elements = static_cast<int>(ggml_nelements(h));
|
int n_elements = static_cast<int>(ggml_nelements(h));
|
||||||
float* dx = (float*)x->data;
|
float* dx = (float*)x->data;
|
||||||
float* dy = (float*)y->data;
|
float* dy = (float*)y->data;
|
||||||
@ -65,7 +65,7 @@ void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tens
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void normalize_tensor(struct ggml_tensor* g) {
|
void normalize_tensor(ggml_tensor* g) {
|
||||||
int n_elements = static_cast<int>(ggml_nelements(g));
|
int n_elements = static_cast<int>(ggml_nelements(g));
|
||||||
float* dg = (float*)g->data;
|
float* dg = (float*)g->data;
|
||||||
float max = -INFINITY;
|
float max = -INFINITY;
|
||||||
@ -78,7 +78,7 @@ void normalize_tensor(struct ggml_tensor* g) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void non_max_supression(struct ggml_tensor* result, struct ggml_tensor* G, struct ggml_tensor* D) {
|
void non_max_supression(ggml_tensor* result, ggml_tensor* G, ggml_tensor* D) {
|
||||||
for (int iy = 1; iy < result->ne[1] - 1; iy++) {
|
for (int iy = 1; iy < result->ne[1] - 1; iy++) {
|
||||||
for (int ix = 1; ix < result->ne[0] - 1; ix++) {
|
for (int ix = 1; ix < result->ne[0] - 1; ix++) {
|
||||||
float angle = ggml_ext_tensor_get_f32(D, ix, iy) * 180.0f / M_PI_;
|
float angle = ggml_ext_tensor_get_f32(D, ix, iy) * 180.0f / M_PI_;
|
||||||
@ -117,7 +117,7 @@ void non_max_supression(struct ggml_tensor* result, struct ggml_tensor* G, struc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) {
|
void threshold_hystersis(ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) {
|
||||||
int n_elements = static_cast<int>(ggml_nelements(img));
|
int n_elements = static_cast<int>(ggml_nelements(img));
|
||||||
float* imd = (float*)img->data;
|
float* imd = (float*)img->data;
|
||||||
float max = -INFINITY;
|
float max = -INFINITY;
|
||||||
@ -163,11 +163,11 @@ void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float lo
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
|
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
|
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
|
|
||||||
if (!work_ctx) {
|
if (!work_ctx) {
|
||||||
LOG_ERROR("ggml_init() failed");
|
LOG_ERROR("ggml_init() failed");
|
||||||
@ -185,19 +185,19 @@ bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold,
|
|||||||
-1, -2, -1};
|
-1, -2, -1};
|
||||||
|
|
||||||
// generate kernel
|
// generate kernel
|
||||||
int kernel_size = 5;
|
int kernel_size = 5;
|
||||||
struct ggml_tensor* gkernel = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, kernel_size, kernel_size, 1, 1);
|
ggml_tensor* gkernel = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, kernel_size, kernel_size, 1, 1);
|
||||||
struct ggml_tensor* sf_kx = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
|
ggml_tensor* sf_kx = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
|
||||||
memcpy(sf_kx->data, kX, ggml_nbytes(sf_kx));
|
memcpy(sf_kx->data, kX, ggml_nbytes(sf_kx));
|
||||||
struct ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
|
ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
|
||||||
memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
|
memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
|
||||||
gaussian_kernel(gkernel);
|
gaussian_kernel(gkernel);
|
||||||
struct ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
|
ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
|
||||||
struct ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
|
ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
|
||||||
struct ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
|
ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
|
||||||
struct ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
|
ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
|
||||||
struct ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
|
ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
|
||||||
struct ggml_tensor* tetha = ggml_dup_tensor(work_ctx, image_gray);
|
ggml_tensor* tetha = ggml_dup_tensor(work_ctx, image_gray);
|
||||||
sd_image_to_ggml_tensor(img, image);
|
sd_image_to_ggml_tensor(img, image);
|
||||||
grayscale(image, image_gray);
|
grayscale(image, image_gray);
|
||||||
convolve(image_gray, image_gray, gkernel, 2);
|
convolve(image_gray, image_gray, gkernel, 2);
|
||||||
|
|||||||
@ -26,9 +26,9 @@ namespace Qwen {
|
|||||||
blocks["linear_2"] = std::shared_ptr<GGMLBlock>(new Linear(time_embed_dim, out_dim, sample_proj_bias));
|
blocks["linear_2"] = std::shared_ptr<GGMLBlock>(new Linear(time_embed_dim, out_dim, sample_proj_bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* sample,
|
ggml_tensor* sample,
|
||||||
struct ggml_tensor* condition = nullptr) {
|
ggml_tensor* condition = nullptr) {
|
||||||
if (condition != nullptr) {
|
if (condition != nullptr) {
|
||||||
auto cond_proj = std::dynamic_pointer_cast<Linear>(blocks["cond_proj"]);
|
auto cond_proj = std::dynamic_pointer_cast<Linear>(blocks["cond_proj"]);
|
||||||
sample = ggml_add(ctx->ggml_ctx, sample, cond_proj->forward(ctx, condition));
|
sample = ggml_add(ctx->ggml_ctx, sample, cond_proj->forward(ctx, condition));
|
||||||
@ -49,8 +49,8 @@ namespace Qwen {
|
|||||||
blocks["timestep_embedder"] = std::shared_ptr<GGMLBlock>(new TimestepEmbedding(256, embedding_dim));
|
blocks["timestep_embedder"] = std::shared_ptr<GGMLBlock>(new TimestepEmbedding(256, embedding_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* timesteps) {
|
ggml_tensor* timesteps) {
|
||||||
// timesteps: [N,]
|
// timesteps: [N,]
|
||||||
// return: [N, embedding_dim]
|
// return: [N, embedding_dim]
|
||||||
auto timestep_embedder = std::dynamic_pointer_cast<TimestepEmbedding>(blocks["timestep_embedder"]);
|
auto timestep_embedder = std::dynamic_pointer_cast<TimestepEmbedding>(blocks["timestep_embedder"]);
|
||||||
@ -107,10 +107,10 @@ namespace Qwen {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* img,
|
ggml_tensor* img,
|
||||||
struct ggml_tensor* txt,
|
ggml_tensor* txt,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr) {
|
ggml_tensor* mask = nullptr) {
|
||||||
// img: [N, n_img_token, hidden_size]
|
// img: [N, n_img_token, hidden_size]
|
||||||
// txt: [N, n_txt_token, hidden_size]
|
// txt: [N, n_txt_token, hidden_size]
|
||||||
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
||||||
@ -249,11 +249,11 @@ namespace Qwen {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
virtual std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* img,
|
ggml_tensor* img,
|
||||||
struct ggml_tensor* txt,
|
ggml_tensor* txt,
|
||||||
struct ggml_tensor* t_emb,
|
ggml_tensor* t_emb,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* modulate_index = nullptr) {
|
ggml_tensor* modulate_index = nullptr) {
|
||||||
// img: [N, n_img_token, hidden_size]
|
// img: [N, n_img_token, hidden_size]
|
||||||
// txt: [N, n_txt_token, hidden_size]
|
// txt: [N, n_txt_token, hidden_size]
|
||||||
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
||||||
@ -325,9 +325,9 @@ namespace Qwen {
|
|||||||
blocks["linear"] = std::shared_ptr<GGMLBlock>(new Linear(conditioning_embedding_dim, embedding_dim * 2, bias));
|
blocks["linear"] = std::shared_ptr<GGMLBlock>(new Linear(conditioning_embedding_dim, embedding_dim * 2, bias));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||||
@ -389,12 +389,12 @@ namespace Qwen {
|
|||||||
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, params.patch_size * params.patch_size * params.out_channels));
|
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, params.patch_size * params.patch_size * params.out_channels));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* modulate_index = nullptr) {
|
ggml_tensor* modulate_index = nullptr) {
|
||||||
auto time_text_embed = std::dynamic_pointer_cast<QwenTimestepProjEmbeddings>(blocks["time_text_embed"]);
|
auto time_text_embed = std::dynamic_pointer_cast<QwenTimestepProjEmbeddings>(blocks["time_text_embed"]);
|
||||||
auto txt_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["txt_norm"]);
|
auto txt_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["txt_norm"]);
|
||||||
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
|
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
|
||||||
@ -404,7 +404,7 @@ namespace Qwen {
|
|||||||
|
|
||||||
auto t_emb = time_text_embed->forward(ctx, timestep);
|
auto t_emb = time_text_embed->forward(ctx, timestep);
|
||||||
if (params.zero_cond_t) {
|
if (params.zero_cond_t) {
|
||||||
auto t_emb_0 = time_text_embed->forward(ctx, ggml_ext_zeros(ctx->ggml_ctx, timestep->ne[0], timestep->ne[1], timestep->ne[2], timestep->ne[3]));
|
auto t_emb_0 = time_text_embed->forward(ctx, ggml_ext_zeros_like(ctx->ggml_ctx, timestep));
|
||||||
t_emb = ggml_concat(ctx->ggml_ctx, t_emb, t_emb_0, 1);
|
t_emb = ggml_concat(ctx->ggml_ctx, t_emb, t_emb_0, 1);
|
||||||
}
|
}
|
||||||
auto img = img_in->forward(ctx, x);
|
auto img = img_in->forward(ctx, x);
|
||||||
@ -429,13 +429,13 @@ namespace Qwen {
|
|||||||
return img;
|
return img;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
struct ggml_tensor* modulate_index = nullptr) {
|
ggml_tensor* modulate_index = nullptr) {
|
||||||
// Forward pass of DiT.
|
// Forward pass of DiT.
|
||||||
// x: [N, C, H, W]
|
// x: [N, C, H, W]
|
||||||
// timestep: [N,]
|
// timestep: [N,]
|
||||||
@ -521,17 +521,17 @@ namespace Qwen {
|
|||||||
return "qwen_image";
|
return "qwen_image";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
qwen_image.get_param_tensors(tensors, prefix);
|
qwen_image.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
bool increase_ref_index = false) {
|
bool increase_ref_index = false) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
struct ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
context = to_backend(context);
|
context = to_backend(context);
|
||||||
@ -587,13 +587,13 @@ namespace Qwen {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = qwen_image.forward(&runner_ctx,
|
ggml_tensor* out = qwen_image.forward(&runner_ctx,
|
||||||
x,
|
x,
|
||||||
timesteps,
|
timesteps,
|
||||||
context,
|
context,
|
||||||
pe,
|
pe,
|
||||||
ref_latents,
|
ref_latents,
|
||||||
modulate_index);
|
modulate_index);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
@ -601,17 +601,17 @@ namespace Qwen {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
bool increase_ref_index = false,
|
bool increase_ref_index = false,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// timesteps: [N, ]
|
// timesteps: [N, ]
|
||||||
// context: [N, max_position, hidden_size]
|
// context: [N, max_position, hidden_size]
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
|
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -619,12 +619,12 @@ namespace Qwen {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -641,7 +641,7 @@ namespace Qwen {
|
|||||||
auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin");
|
auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin");
|
||||||
print_ggml_tensor(context);
|
print_ggml_tensor(context);
|
||||||
|
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
|
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
|
||||||
|
|||||||
24
src/rope.hpp
24
src/rope.hpp
@ -600,10 +600,10 @@ namespace Rope {
|
|||||||
return embed_nd(ids, bs, static_cast<float>(theta), axes_dim, wrap_dims);
|
return embed_nd(ids, bs, static_cast<float>(theta), axes_dim, wrap_dims);
|
||||||
}
|
}
|
||||||
|
|
||||||
__STATIC_INLINE__ struct ggml_tensor* apply_rope(struct ggml_context* ctx,
|
__STATIC_INLINE__ ggml_tensor* apply_rope(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
bool rope_interleaved = true) {
|
bool rope_interleaved = true) {
|
||||||
// x: [N, L, n_head, d_head]
|
// x: [N, L, n_head, d_head]
|
||||||
// pe: [L, d_head/2, 2, 2], [[cos, -sin], [sin, cos]]
|
// pe: [L, d_head/2, 2, 2], [[cos, -sin], [sin, cos]]
|
||||||
int64_t d_head = x->ne[0];
|
int64_t d_head = x->ne[0];
|
||||||
@ -641,14 +641,14 @@ namespace Rope {
|
|||||||
return x_out;
|
return x_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
__STATIC_INLINE__ struct ggml_tensor* attention(GGMLRunnerContext* ctx,
|
__STATIC_INLINE__ ggml_tensor* attention(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* q,
|
ggml_tensor* q,
|
||||||
struct ggml_tensor* k,
|
ggml_tensor* k,
|
||||||
struct ggml_tensor* v,
|
ggml_tensor* v,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask,
|
ggml_tensor* mask,
|
||||||
float kv_scale = 1.0f,
|
float kv_scale = 1.0f,
|
||||||
bool rope_interleaved = true) {
|
bool rope_interleaved = true) {
|
||||||
// q,k,v: [N, L, n_head, d_head]
|
// q,k,v: [N, L, n_head, d_head]
|
||||||
// pe: [L, d_head/2, 2, 2]
|
// pe: [L, d_head/2, 2, 2]
|
||||||
// return: [N, L, n_head*d_head]
|
// return: [N, L, n_head*d_head]
|
||||||
|
|||||||
195
src/spectrum.hpp
Normal file
195
src/spectrum.hpp
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
#ifndef __SPECTRUM_HPP__
|
||||||
|
#define __SPECTRUM_HPP__
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
#include <cstring>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "ggml_extend.hpp"
|
||||||
|
|
||||||
|
struct SpectrumConfig {
|
||||||
|
float w = 0.40f;
|
||||||
|
int m = 3;
|
||||||
|
float lam = 1.0f;
|
||||||
|
int window_size = 2;
|
||||||
|
float flex_window = 0.50f;
|
||||||
|
int warmup_steps = 4;
|
||||||
|
float stop_percent = 0.9f;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct SpectrumState {
|
||||||
|
SpectrumConfig config;
|
||||||
|
int cnt = 0;
|
||||||
|
int num_cached = 0;
|
||||||
|
float curr_ws = 2.0f;
|
||||||
|
int K = 6;
|
||||||
|
int stop_step = 0;
|
||||||
|
int total_steps_skipped = 0;
|
||||||
|
|
||||||
|
std::vector<std::vector<float>> H_buf;
|
||||||
|
std::vector<float> T_buf;
|
||||||
|
|
||||||
|
void init(const SpectrumConfig& cfg, size_t total_steps) {
|
||||||
|
config = cfg;
|
||||||
|
cnt = 0;
|
||||||
|
num_cached = 0;
|
||||||
|
curr_ws = (float)cfg.window_size;
|
||||||
|
K = std::max(cfg.m + 1, 6);
|
||||||
|
stop_step = (int)(cfg.stop_percent * (float)total_steps);
|
||||||
|
total_steps_skipped = 0;
|
||||||
|
H_buf.clear();
|
||||||
|
T_buf.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
float taus(int step_cnt) const {
|
||||||
|
return (step_cnt / 50.0f) * 2.0f - 1.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool should_predict() {
|
||||||
|
if (cnt < config.warmup_steps)
|
||||||
|
return false;
|
||||||
|
if (stop_step > 0 && cnt >= stop_step)
|
||||||
|
return false;
|
||||||
|
if ((int)H_buf.size() < 2)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
int ws = std::max(1, (int)std::floor(curr_ws));
|
||||||
|
return (num_cached + 1) % ws != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void update(const ggml_tensor* denoised) {
|
||||||
|
int64_t ne = ggml_nelements(denoised);
|
||||||
|
const float* data = (const float*)denoised->data;
|
||||||
|
|
||||||
|
H_buf.emplace_back(data, data + ne);
|
||||||
|
T_buf.push_back(taus(cnt));
|
||||||
|
|
||||||
|
while ((int)H_buf.size() > K) {
|
||||||
|
H_buf.erase(H_buf.begin());
|
||||||
|
T_buf.erase(T_buf.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cnt >= config.warmup_steps)
|
||||||
|
curr_ws += config.flex_window;
|
||||||
|
|
||||||
|
num_cached = 0;
|
||||||
|
cnt++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void predict(ggml_tensor* denoised) {
|
||||||
|
int64_t F = (int64_t)H_buf[0].size();
|
||||||
|
int K_curr = (int)H_buf.size();
|
||||||
|
int M1 = config.m + 1;
|
||||||
|
float tau_at = taus(cnt);
|
||||||
|
|
||||||
|
// Design matrix X: K_curr x M1 (Chebyshev basis)
|
||||||
|
std::vector<float> X(K_curr * M1);
|
||||||
|
for (int i = 0; i < K_curr; i++) {
|
||||||
|
X[i * M1] = 1.0f;
|
||||||
|
if (M1 > 1)
|
||||||
|
X[i * M1 + 1] = T_buf[i];
|
||||||
|
for (int j = 2; j < M1; j++)
|
||||||
|
X[i * M1 + j] = 2.0f * T_buf[i] * X[i * M1 + j - 1] - X[i * M1 + j - 2];
|
||||||
|
}
|
||||||
|
|
||||||
|
// x_star: Chebyshev basis at current tau
|
||||||
|
std::vector<float> x_star(M1);
|
||||||
|
x_star[0] = 1.0f;
|
||||||
|
if (M1 > 1)
|
||||||
|
x_star[1] = tau_at;
|
||||||
|
for (int j = 2; j < M1; j++)
|
||||||
|
x_star[j] = 2.0f * tau_at * x_star[j - 1] - x_star[j - 2];
|
||||||
|
|
||||||
|
// XtX = X^T X + lambda I
|
||||||
|
std::vector<float> XtX(M1 * M1, 0.0f);
|
||||||
|
for (int i = 0; i < M1; i++) {
|
||||||
|
for (int j = 0; j < M1; j++) {
|
||||||
|
float sum = 0.0f;
|
||||||
|
for (int k = 0; k < K_curr; k++)
|
||||||
|
sum += X[k * M1 + i] * X[k * M1 + j];
|
||||||
|
XtX[i * M1 + j] = sum + (i == j ? config.lam : 0.0f);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cholesky decomposition
|
||||||
|
std::vector<float> L(M1 * M1, 0.0f);
|
||||||
|
if (!cholesky_decompose(XtX.data(), L.data(), M1)) {
|
||||||
|
float trace = 0.0f;
|
||||||
|
for (int i = 0; i < M1; i++)
|
||||||
|
trace += XtX[i * M1 + i];
|
||||||
|
for (int i = 0; i < M1; i++)
|
||||||
|
XtX[i * M1 + i] += 1e-4f * trace / M1;
|
||||||
|
cholesky_decompose(XtX.data(), L.data(), M1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Solve XtX v = x_star
|
||||||
|
std::vector<float> v(M1);
|
||||||
|
cholesky_solve(L.data(), x_star.data(), v.data(), M1);
|
||||||
|
|
||||||
|
// Prediction weights per history entry
|
||||||
|
std::vector<float> weights(K_curr, 0.0f);
|
||||||
|
for (int k = 0; k < K_curr; k++)
|
||||||
|
for (int j = 0; j < M1; j++)
|
||||||
|
weights[k] += X[k * M1 + j] * v[j];
|
||||||
|
|
||||||
|
// Blend Chebyshev and Taylor predictions
|
||||||
|
float* out = (float*)denoised->data;
|
||||||
|
float w_cheb = config.w;
|
||||||
|
float w_taylor = 1.0f - w_cheb;
|
||||||
|
const float* h_last = H_buf.back().data();
|
||||||
|
const float* h_prev = H_buf[H_buf.size() - 2].data();
|
||||||
|
|
||||||
|
for (int64_t f = 0; f < F; f++) {
|
||||||
|
float pred_cheb = 0.0f;
|
||||||
|
for (int k = 0; k < K_curr; k++)
|
||||||
|
pred_cheb += weights[k] * H_buf[k][f];
|
||||||
|
|
||||||
|
float pred_taylor = h_last[f] + 0.5f * (h_last[f] - h_prev[f]);
|
||||||
|
|
||||||
|
out[f] = w_taylor * pred_taylor + w_cheb * pred_cheb;
|
||||||
|
}
|
||||||
|
|
||||||
|
num_cached++;
|
||||||
|
total_steps_skipped++;
|
||||||
|
cnt++;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static bool cholesky_decompose(const float* A, float* L, int n) {
|
||||||
|
std::memset(L, 0, n * n * sizeof(float));
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
for (int j = 0; j <= i; j++) {
|
||||||
|
float sum = 0.0f;
|
||||||
|
for (int k = 0; k < j; k++)
|
||||||
|
sum += L[i * n + k] * L[j * n + k];
|
||||||
|
if (i == j) {
|
||||||
|
float diag = A[i * n + i] - sum;
|
||||||
|
if (diag <= 0.0f)
|
||||||
|
return false;
|
||||||
|
L[i * n + j] = std::sqrt(diag);
|
||||||
|
} else {
|
||||||
|
L[i * n + j] = (A[i * n + j] - sum) / L[j * n + j];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cholesky_solve(const float* L, const float* b, float* x, int n) {
|
||||||
|
std::vector<float> y(n);
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
float sum = 0.0f;
|
||||||
|
for (int j = 0; j < i; j++)
|
||||||
|
sum += L[i * n + j] * y[j];
|
||||||
|
y[i] = (b[i] - sum) / L[i * n + i];
|
||||||
|
}
|
||||||
|
for (int i = n - 1; i >= 0; i--) {
|
||||||
|
float sum = 0.0f;
|
||||||
|
for (int j = i + 1; j < n; j++)
|
||||||
|
sum += L[j * n + i] * x[j];
|
||||||
|
x[i] = (y[i] - sum) / L[i * n + i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // __SPECTRUM_HPP__
|
||||||
File diff suppressed because it is too large
Load Diff
114
src/t5.hpp
114
src/t5.hpp
@ -211,9 +211,9 @@ protected:
|
|||||||
// implementation. It's based on the following three ideas:
|
// implementation. It's based on the following three ideas:
|
||||||
//
|
//
|
||||||
// 1. Because it uses the *unigram* model:
|
// 1. Because it uses the *unigram* model:
|
||||||
// best_score(x1, x2, …, xt) = best_score(x1, x2, …, x{t-1}) + score(xt)
|
// best_score(x1, x2, <EFBFBD>? xt) = best_score(x1, x2, <20>? x{t-1}) + score(xt)
|
||||||
// Deciding the best path (and score) can be decoupled into two isolated
|
// Deciding the best path (and score) can be decoupled into two isolated
|
||||||
// terms: (a) the best path ended before the last token `best_score(x1, x2, …,
|
// terms: (a) the best path ended before the last token `best_score(x1, x2, <EFBFBD>?
|
||||||
// x{t-1})`, and (b) the last token and its `score(xt)`. The two terms are
|
// x{t-1})`, and (b) the last token and its `score(xt)`. The two terms are
|
||||||
// not related to each other at all.
|
// not related to each other at all.
|
||||||
//
|
//
|
||||||
@ -462,7 +462,7 @@ protected:
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
float eps;
|
float eps;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type wtype = GGML_TYPE_F32;
|
enum ggml_type wtype = GGML_TYPE_F32;
|
||||||
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||||
}
|
}
|
||||||
@ -473,10 +473,10 @@ public:
|
|||||||
: hidden_size(hidden_size),
|
: hidden_size(hidden_size),
|
||||||
eps(eps) {}
|
eps(eps) {}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
struct ggml_tensor* w = params["weight"];
|
ggml_tensor* w = params["weight"];
|
||||||
x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
|
x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
|
||||||
x = ggml_mul(ctx->ggml_ctx, x, w);
|
x = ggml_mul(ctx->ggml_ctx, x, w);
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -488,7 +488,7 @@ public:
|
|||||||
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false));
|
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
auto wi = std::dynamic_pointer_cast<Linear>(blocks["wi"]);
|
auto wi = std::dynamic_pointer_cast<Linear>(blocks["wi"]);
|
||||||
auto wo = std::dynamic_pointer_cast<Linear>(blocks["wo"]);
|
auto wo = std::dynamic_pointer_cast<Linear>(blocks["wo"]);
|
||||||
@ -510,7 +510,7 @@ public:
|
|||||||
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false, false, false, scale));
|
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false, false, false, scale));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
auto wi_0 = std::dynamic_pointer_cast<Linear>(blocks["wi_0"]);
|
auto wi_0 = std::dynamic_pointer_cast<Linear>(blocks["wi_0"]);
|
||||||
auto wi_1 = std::dynamic_pointer_cast<Linear>(blocks["wi_1"]);
|
auto wi_1 = std::dynamic_pointer_cast<Linear>(blocks["wi_1"]);
|
||||||
@ -531,7 +531,7 @@ public:
|
|||||||
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
auto DenseReluDense = std::dynamic_pointer_cast<T5DenseGatedActDense>(blocks["DenseReluDense"]);
|
auto DenseReluDense = std::dynamic_pointer_cast<T5DenseGatedActDense>(blocks["DenseReluDense"]);
|
||||||
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
|
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
|
||||||
@ -570,8 +570,8 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* compute_bias(GGMLRunnerContext* ctx,
|
ggml_tensor* compute_bias(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* relative_position_bucket) {
|
ggml_tensor* relative_position_bucket) {
|
||||||
auto relative_attention_bias = std::dynamic_pointer_cast<Embedding>(blocks["relative_attention_bias"]);
|
auto relative_attention_bias = std::dynamic_pointer_cast<Embedding>(blocks["relative_attention_bias"]);
|
||||||
|
|
||||||
auto values = relative_attention_bias->forward(ctx, relative_position_bucket); // shape (query_length, key_length, num_heads)
|
auto values = relative_attention_bias->forward(ctx, relative_position_bucket); // shape (query_length, key_length, num_heads)
|
||||||
@ -580,11 +580,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* past_bias = nullptr,
|
ggml_tensor* past_bias = nullptr,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
ggml_tensor* relative_position_bucket = nullptr) {
|
||||||
auto q_proj = std::dynamic_pointer_cast<Linear>(blocks["q"]);
|
auto q_proj = std::dynamic_pointer_cast<Linear>(blocks["q"]);
|
||||||
auto k_proj = std::dynamic_pointer_cast<Linear>(blocks["k"]);
|
auto k_proj = std::dynamic_pointer_cast<Linear>(blocks["k"]);
|
||||||
auto v_proj = std::dynamic_pointer_cast<Linear>(blocks["v"]);
|
auto v_proj = std::dynamic_pointer_cast<Linear>(blocks["v"]);
|
||||||
@ -629,11 +629,11 @@ public:
|
|||||||
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* past_bias = nullptr,
|
ggml_tensor* past_bias = nullptr,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
ggml_tensor* relative_position_bucket = nullptr) {
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
auto SelfAttention = std::dynamic_pointer_cast<T5Attention>(blocks["SelfAttention"]);
|
auto SelfAttention = std::dynamic_pointer_cast<T5Attention>(blocks["SelfAttention"]);
|
||||||
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
|
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
|
||||||
@ -655,11 +655,11 @@ public:
|
|||||||
blocks["layer.1"] = std::shared_ptr<GGMLBlock>(new T5LayerFF(model_dim, ff_dim));
|
blocks["layer.1"] = std::shared_ptr<GGMLBlock>(new T5LayerFF(model_dim, ff_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* past_bias = nullptr,
|
ggml_tensor* past_bias = nullptr,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
ggml_tensor* relative_position_bucket = nullptr) {
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
auto layer_0 = std::dynamic_pointer_cast<T5LayerSelfAttention>(blocks["layer.0"]);
|
auto layer_0 = std::dynamic_pointer_cast<T5LayerSelfAttention>(blocks["layer.0"]);
|
||||||
auto layer_1 = std::dynamic_pointer_cast<T5LayerFF>(blocks["layer.1"]);
|
auto layer_1 = std::dynamic_pointer_cast<T5LayerFF>(blocks["layer.1"]);
|
||||||
@ -690,11 +690,11 @@ public:
|
|||||||
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* past_bias = nullptr,
|
ggml_tensor* past_bias = nullptr,
|
||||||
struct ggml_tensor* attention_mask = nullptr,
|
ggml_tensor* attention_mask = nullptr,
|
||||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
ggml_tensor* relative_position_bucket = nullptr) {
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
for (int i = 0; i < num_layers; i++) {
|
for (int i = 0; i < num_layers; i++) {
|
||||||
auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]);
|
||||||
@ -737,11 +737,11 @@ public:
|
|||||||
params.model_dim));
|
params.model_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* past_bias = nullptr,
|
ggml_tensor* past_bias = nullptr,
|
||||||
struct ggml_tensor* attention_mask = nullptr,
|
ggml_tensor* attention_mask = nullptr,
|
||||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
ggml_tensor* relative_position_bucket = nullptr) {
|
||||||
// input_ids: [N, n_token]
|
// input_ids: [N, n_token]
|
||||||
|
|
||||||
auto shared = std::dynamic_pointer_cast<Embedding>(blocks["shared"]);
|
auto shared = std::dynamic_pointer_cast<Embedding>(blocks["shared"]);
|
||||||
@ -776,14 +776,14 @@ struct T5Runner : public GGMLRunner {
|
|||||||
return "t5";
|
return "t5";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
model.get_param_tensors(tensors, prefix);
|
model.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* relative_position_bucket,
|
ggml_tensor* relative_position_bucket,
|
||||||
struct ggml_tensor* attention_mask = nullptr) {
|
ggml_tensor* attention_mask = nullptr) {
|
||||||
size_t N = input_ids->ne[1];
|
size_t N = input_ids->ne[1];
|
||||||
size_t n_token = input_ids->ne[0];
|
size_t n_token = input_ids->ne[0];
|
||||||
|
|
||||||
@ -791,9 +791,9 @@ struct T5Runner : public GGMLRunner {
|
|||||||
return hidden_states;
|
return hidden_states;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
|
ggml_cgraph* build_graph(ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* attention_mask = nullptr) {
|
ggml_tensor* attention_mask = nullptr) {
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
|
||||||
input_ids = to_backend(input_ids);
|
input_ids = to_backend(input_ids);
|
||||||
attention_mask = to_backend(attention_mask);
|
attention_mask = to_backend(attention_mask);
|
||||||
@ -813,8 +813,8 @@ struct T5Runner : public GGMLRunner {
|
|||||||
input_ids->ne[0]);
|
input_ids->ne[0]);
|
||||||
set_backend_tensor_data(relative_position_bucket, relative_position_bucket_vec.data());
|
set_backend_tensor_data(relative_position_bucket, relative_position_bucket_vec.data());
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, relative_position_bucket, attention_mask);
|
ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, relative_position_bucket, attention_mask);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, hidden_states);
|
ggml_build_forward_expand(gf, hidden_states);
|
||||||
|
|
||||||
@ -822,11 +822,11 @@ struct T5Runner : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool compute(const int n_threads,
|
||||||
struct ggml_tensor* input_ids,
|
ggml_tensor* input_ids,
|
||||||
struct ggml_tensor* attention_mask,
|
ggml_tensor* attention_mask,
|
||||||
ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(input_ids, attention_mask);
|
return build_graph(input_ids, attention_mask);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||||
@ -912,7 +912,7 @@ struct T5Embedder {
|
|||||||
: model(backend, offload_params_to_cpu, tensor_storage_map, prefix, is_umt5), tokenizer(is_umt5) {
|
: model(backend, offload_params_to_cpu, tensor_storage_map, prefix, is_umt5), tokenizer(is_umt5) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
model.get_param_tensors(tensors, prefix);
|
model.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -962,17 +962,17 @@ struct T5Embedder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
std::string text("a lovely cat");
|
std::string text("a lovely cat");
|
||||||
// std::string text("一只可爱的猫"); // umt5 chinease test
|
// std::string text("一只可爱的<EFBFBD>?); // umt5 chinease test
|
||||||
auto tokens_and_weights = tokenize(text, 512, true);
|
auto tokens_and_weights = tokenize(text, 512, true);
|
||||||
std::vector<int>& tokens = std::get<0>(tokens_and_weights);
|
std::vector<int>& tokens = std::get<0>(tokens_and_weights);
|
||||||
std::vector<float>& weights = std::get<1>(tokens_and_weights);
|
std::vector<float>& weights = std::get<1>(tokens_and_weights);
|
||||||
@ -981,9 +981,9 @@ struct T5Embedder {
|
|||||||
printf("%d ", token);
|
printf("%d ", token);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||||
auto attention_mask = vector_to_ggml_tensor(work_ctx, masks);
|
auto attention_mask = vector_to_ggml_tensor(work_ctx, masks);
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
model.compute(8, input_ids, attention_mask, &out, work_ctx);
|
model.compute(8, input_ids, attention_mask, &out, work_ctx);
|
||||||
|
|||||||
207
src/tae.hpp
207
src/tae.hpp
@ -37,7 +37,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [n, n_in, h, w]
|
// x: [n, n_in, h, w]
|
||||||
// return: [n, n_out, h, w]
|
// return: [n, n_out, h, w]
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ public:
|
|||||||
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1}));
|
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [n, in_channels, h, w]
|
// x: [n, in_channels, h, w]
|
||||||
// return: [n, z_channels, h/8, w/8]
|
// return: [n, z_channels, h/8, w/8]
|
||||||
|
|
||||||
@ -157,7 +157,7 @@ public:
|
|||||||
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
|
||||||
// z: [n, z_channels, h, w]
|
// z: [n, z_channels, h, w]
|
||||||
// return: [n, out_channels, h*8, w*8]
|
// return: [n, out_channels, h*8, w*8]
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ public:
|
|||||||
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels * stride, channels, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
|
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels * stride, channels, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
|
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
|
||||||
auto h = x;
|
auto h = x;
|
||||||
if (stride != 1) {
|
if (stride != 1) {
|
||||||
@ -212,7 +212,7 @@ public:
|
|||||||
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, channels * stride, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
|
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, channels * stride, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
|
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
|
||||||
auto h = conv->forward(ctx, x);
|
auto h = conv->forward(ctx, x);
|
||||||
if (stride != 1) {
|
if (stride != 1) {
|
||||||
@ -236,7 +236,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* past) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* past) {
|
||||||
// x: [n, channels, h, w]
|
// x: [n, channels, h, w]
|
||||||
auto conv0 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.0"]);
|
auto conv0 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.0"]);
|
||||||
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.2"]);
|
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.2"]);
|
||||||
@ -260,10 +260,10 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_tensor* patchify(struct ggml_context* ctx,
|
ggml_tensor* patchify(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t patch_size,
|
int64_t patch_size,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// x: [f, b*c, h*q, w*r]
|
// x: [f, b*c, h*q, w*r]
|
||||||
// return: [f, b*c*r*q, h, w]
|
// return: [f, b*c*r*q, h, w]
|
||||||
if (patch_size == 1) {
|
if (patch_size == 1) {
|
||||||
@ -289,10 +289,10 @@ struct ggml_tensor* patchify(struct ggml_context* ctx,
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
|
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t patch_size,
|
int64_t patch_size,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// x: [f, b*c*r*q, h, w]
|
// x: [f, b*c*r*q, h, w]
|
||||||
// return: [f, b*c, h*q, w*r]
|
// return: [f, b*c, h*q, w*r]
|
||||||
if (patch_size == 1) {
|
if (patch_size == 1) {
|
||||||
@ -339,7 +339,7 @@ public:
|
|||||||
blocks[std::to_string(index)] = std::shared_ptr<GGMLBlock>(new Conv2d(hidden, z_channels, {3, 3}, {1, 1}, {1, 1}));
|
blocks[std::to_string(index)] = std::shared_ptr<GGMLBlock>(new Conv2d(hidden, z_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
|
||||||
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["0"]);
|
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["0"]);
|
||||||
|
|
||||||
if (patch_size > 1) {
|
if (patch_size > 1) {
|
||||||
@ -396,7 +396,7 @@ public:
|
|||||||
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels[num_layers], out_channels * patch_size * patch_size, {3, 3}, {1, 1}, {1, 1}));
|
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels[num_layers], out_channels * patch_size * patch_size, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
|
||||||
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["1"]);
|
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["1"]);
|
||||||
|
|
||||||
// Clamp()
|
// Clamp()
|
||||||
@ -442,11 +442,13 @@ protected:
|
|||||||
bool decode_only;
|
bool decode_only;
|
||||||
SDVersion version;
|
SDVersion version;
|
||||||
|
|
||||||
|
public:
|
||||||
|
int z_channels = 16;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TAEHV(bool decode_only = true, SDVersion version = VERSION_WAN2)
|
TAEHV(bool decode_only = true, SDVersion version = VERSION_WAN2)
|
||||||
: decode_only(decode_only), version(version) {
|
: decode_only(decode_only), version(version) {
|
||||||
int z_channels = 16;
|
int patch = 1;
|
||||||
int patch = 1;
|
|
||||||
if (version == VERSION_WAN2_2_TI2V) {
|
if (version == VERSION_WAN2_2_TI2V) {
|
||||||
z_channels = 48;
|
z_channels = 48;
|
||||||
patch = 2;
|
patch = 2;
|
||||||
@ -457,7 +459,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) {
|
ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||||
auto decoder = std::dynamic_pointer_cast<TinyVideoDecoder>(blocks["decoder"]);
|
auto decoder = std::dynamic_pointer_cast<TinyVideoDecoder>(blocks["decoder"]);
|
||||||
if (sd_version_is_wan(version)) {
|
if (sd_version_is_wan(version)) {
|
||||||
// (W, H, C, T) -> (W, H, T, C)
|
// (W, H, C, T) -> (W, H, T, C)
|
||||||
@ -471,7 +473,7 @@ public:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto encoder = std::dynamic_pointer_cast<TinyVideoEncoder>(blocks["encoder"]);
|
auto encoder = std::dynamic_pointer_cast<TinyVideoEncoder>(blocks["encoder"]);
|
||||||
// (W, H, T, C) -> (W, H, C, T)
|
// (W, H, T, C) -> (W, H, C, T)
|
||||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 1, 3, 2));
|
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 1, 3, 2));
|
||||||
@ -494,10 +496,12 @@ protected:
|
|||||||
bool decode_only;
|
bool decode_only;
|
||||||
bool taef2 = false;
|
bool taef2 = false;
|
||||||
|
|
||||||
|
public:
|
||||||
|
int z_channels = 4;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TAESD(bool decode_only = true, SDVersion version = VERSION_SD1)
|
TAESD(bool decode_only = true, SDVersion version = VERSION_SD1)
|
||||||
: decode_only(decode_only) {
|
: decode_only(decode_only) {
|
||||||
int z_channels = 4;
|
|
||||||
bool use_midblock_gn = false;
|
bool use_midblock_gn = false;
|
||||||
taef2 = sd_version_is_flux2(version);
|
taef2 = sd_version_is_flux2(version);
|
||||||
|
|
||||||
@ -515,7 +519,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) {
|
ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||||
auto decoder = std::dynamic_pointer_cast<TinyDecoder>(blocks["decoder.layers"]);
|
auto decoder = std::dynamic_pointer_cast<TinyDecoder>(blocks["decoder.layers"]);
|
||||||
if (taef2) {
|
if (taef2) {
|
||||||
z = unpatchify(ctx->ggml_ctx, z, 2);
|
z = unpatchify(ctx->ggml_ctx, z, 2);
|
||||||
@ -523,7 +527,7 @@ public:
|
|||||||
return decoder->forward(ctx, z);
|
return decoder->forward(ctx, z);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto encoder = std::dynamic_pointer_cast<TinyEncoder>(blocks["encoder.layers"]);
|
auto encoder = std::dynamic_pointer_cast<TinyEncoder>(blocks["encoder.layers"]);
|
||||||
auto z = encoder->forward(ctx, x);
|
auto z = encoder->forward(ctx, x);
|
||||||
if (taef2) {
|
if (taef2) {
|
||||||
@ -533,20 +537,7 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TinyAutoEncoder : public GGMLRunner {
|
struct TinyImageAutoEncoder : public VAE {
|
||||||
TinyAutoEncoder(ggml_backend_t backend, bool offload_params_to_cpu)
|
|
||||||
: GGMLRunner(backend, offload_params_to_cpu) {}
|
|
||||||
virtual bool compute(const int n_threads,
|
|
||||||
struct ggml_tensor* z,
|
|
||||||
bool decode_graph,
|
|
||||||
struct ggml_tensor** output,
|
|
||||||
struct ggml_context* output_ctx = nullptr) = 0;
|
|
||||||
|
|
||||||
virtual bool load_from_file(const std::string& file_path, int n_threads) = 0;
|
|
||||||
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
|
||||||
TAESD taesd;
|
TAESD taesd;
|
||||||
bool decode_only = false;
|
bool decode_only = false;
|
||||||
|
|
||||||
@ -558,7 +549,8 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
|||||||
SDVersion version = VERSION_SD1)
|
SDVersion version = VERSION_SD1)
|
||||||
: decode_only(decoder_only),
|
: decode_only(decoder_only),
|
||||||
taesd(decoder_only, version),
|
taesd(decoder_only, version),
|
||||||
TinyAutoEncoder(backend, offload_params_to_cpu) {
|
VAE(version, backend, offload_params_to_cpu) {
|
||||||
|
scale_input = false;
|
||||||
taesd.init(params_ctx, tensor_storage_map, prefix);
|
taesd.init(params_ctx, tensor_storage_map, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -566,52 +558,41 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
|||||||
return "taesd";
|
return "taesd";
|
||||||
}
|
}
|
||||||
|
|
||||||
bool load_from_file(const std::string& file_path, int n_threads) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
LOG_INFO("loading taesd from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false");
|
|
||||||
alloc_params_buffer();
|
|
||||||
std::map<std::string, ggml_tensor*> taesd_tensors;
|
|
||||||
taesd.get_param_tensors(taesd_tensors);
|
|
||||||
std::set<std::string> ignore_tensors;
|
|
||||||
if (decode_only) {
|
|
||||||
ignore_tensors.insert("encoder.");
|
|
||||||
}
|
|
||||||
|
|
||||||
ModelLoader model_loader;
|
|
||||||
if (!model_loader.init_from_file_and_convert_name(file_path)) {
|
|
||||||
LOG_ERROR("init taesd model loader from file failed: '%s'", file_path.c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool success = model_loader.load_tensors(taesd_tensors, ignore_tensors, n_threads);
|
|
||||||
|
|
||||||
if (!success) {
|
|
||||||
LOG_ERROR("load tae tensors from model loader failed");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_INFO("taesd model loaded");
|
|
||||||
return success;
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
|
||||||
taesd.get_param_tensors(tensors, prefix);
|
taesd.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
return vae_output;
|
||||||
z = to_backend(z);
|
}
|
||||||
auto runner_ctx = get_context();
|
|
||||||
struct ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
|
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_encoder_output_channels(int input_channels) {
|
||||||
|
return taesd.z_channels;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
|
||||||
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
z = to_backend(z);
|
||||||
|
auto runner_ctx = get_context();
|
||||||
|
ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool _compute(const int n_threads,
|
||||||
struct ggml_tensor* z,
|
ggml_tensor* z,
|
||||||
bool decode_graph,
|
bool decode_graph,
|
||||||
struct ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(z, decode_graph);
|
return build_graph(z, decode_graph);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -619,7 +600,7 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TinyVideoAutoEncoder : public TinyAutoEncoder {
|
struct TinyVideoAutoEncoder : public VAE {
|
||||||
TAEHV taehv;
|
TAEHV taehv;
|
||||||
bool decode_only = false;
|
bool decode_only = false;
|
||||||
|
|
||||||
@ -631,7 +612,8 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder {
|
|||||||
SDVersion version = VERSION_WAN2)
|
SDVersion version = VERSION_WAN2)
|
||||||
: decode_only(decoder_only),
|
: decode_only(decoder_only),
|
||||||
taehv(decoder_only, version),
|
taehv(decoder_only, version),
|
||||||
TinyAutoEncoder(backend, offload_params_to_cpu) {
|
VAE(version, backend, offload_params_to_cpu) {
|
||||||
|
scale_input = false;
|
||||||
taehv.init(params_ctx, tensor_storage_map, prefix);
|
taehv.init(params_ctx, tensor_storage_map, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -639,52 +621,41 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder {
|
|||||||
return "taehv";
|
return "taehv";
|
||||||
}
|
}
|
||||||
|
|
||||||
bool load_from_file(const std::string& file_path, int n_threads) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
LOG_INFO("loading taehv from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false");
|
|
||||||
alloc_params_buffer();
|
|
||||||
std::map<std::string, ggml_tensor*> taehv_tensors;
|
|
||||||
taehv.get_param_tensors(taehv_tensors);
|
|
||||||
std::set<std::string> ignore_tensors;
|
|
||||||
if (decode_only) {
|
|
||||||
ignore_tensors.insert("encoder.");
|
|
||||||
}
|
|
||||||
|
|
||||||
ModelLoader model_loader;
|
|
||||||
if (!model_loader.init_from_file(file_path)) {
|
|
||||||
LOG_ERROR("init taehv model loader from file failed: '%s'", file_path.c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool success = model_loader.load_tensors(taehv_tensors, ignore_tensors, n_threads);
|
|
||||||
|
|
||||||
if (!success) {
|
|
||||||
LOG_ERROR("load tae tensors from model loader failed");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_INFO("taehv model loaded");
|
|
||||||
return success;
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
|
||||||
taehv.get_param_tensors(tensors, prefix);
|
taehv.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
return vae_output;
|
||||||
z = to_backend(z);
|
}
|
||||||
auto runner_ctx = get_context();
|
|
||||||
struct ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
|
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_encoder_output_channels(int input_channels) {
|
||||||
|
return taehv.z_channels;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
|
||||||
|
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||||
|
z = to_backend(z);
|
||||||
|
auto runner_ctx = get_context();
|
||||||
|
ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool _compute(const int n_threads,
|
||||||
struct ggml_tensor* z,
|
ggml_tensor* z,
|
||||||
bool decode_graph,
|
bool decode_graph,
|
||||||
struct ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(z, decode_graph);
|
return build_graph(z, decode_graph);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -19,6 +19,7 @@ struct UCacheConfig {
|
|||||||
bool adaptive_threshold = true;
|
bool adaptive_threshold = true;
|
||||||
float early_step_multiplier = 0.5f;
|
float early_step_multiplier = 0.5f;
|
||||||
float late_step_multiplier = 1.5f;
|
float late_step_multiplier = 1.5f;
|
||||||
|
float relative_norm_gain = 1.6f;
|
||||||
bool reset_error_on_compute = true;
|
bool reset_error_on_compute = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -45,14 +46,16 @@ struct UCacheState {
|
|||||||
bool has_output_prev_norm = false;
|
bool has_output_prev_norm = false;
|
||||||
bool has_relative_transformation_rate = false;
|
bool has_relative_transformation_rate = false;
|
||||||
float relative_transformation_rate = 0.0f;
|
float relative_transformation_rate = 0.0f;
|
||||||
float cumulative_change_rate = 0.0f;
|
|
||||||
float last_input_change = 0.0f;
|
float last_input_change = 0.0f;
|
||||||
bool has_last_input_change = false;
|
bool has_last_input_change = false;
|
||||||
|
float output_change_ema = 0.0f;
|
||||||
|
bool has_output_change_ema = false;
|
||||||
int total_steps_skipped = 0;
|
int total_steps_skipped = 0;
|
||||||
int current_step_index = -1;
|
int current_step_index = -1;
|
||||||
int steps_computed_since_active = 0;
|
int steps_computed_since_active = 0;
|
||||||
|
int expected_total_steps = 0;
|
||||||
|
int consecutive_skipped_steps = 0;
|
||||||
float accumulated_error = 0.0f;
|
float accumulated_error = 0.0f;
|
||||||
float reference_output_norm = 0.0f;
|
|
||||||
|
|
||||||
struct BlockMetrics {
|
struct BlockMetrics {
|
||||||
float sum_transformation_rate = 0.0f;
|
float sum_transformation_rate = 0.0f;
|
||||||
@ -106,14 +109,16 @@ struct UCacheState {
|
|||||||
has_output_prev_norm = false;
|
has_output_prev_norm = false;
|
||||||
has_relative_transformation_rate = false;
|
has_relative_transformation_rate = false;
|
||||||
relative_transformation_rate = 0.0f;
|
relative_transformation_rate = 0.0f;
|
||||||
cumulative_change_rate = 0.0f;
|
|
||||||
last_input_change = 0.0f;
|
last_input_change = 0.0f;
|
||||||
has_last_input_change = false;
|
has_last_input_change = false;
|
||||||
|
output_change_ema = 0.0f;
|
||||||
|
has_output_change_ema = false;
|
||||||
total_steps_skipped = 0;
|
total_steps_skipped = 0;
|
||||||
current_step_index = -1;
|
current_step_index = -1;
|
||||||
steps_computed_since_active = 0;
|
steps_computed_since_active = 0;
|
||||||
|
expected_total_steps = 0;
|
||||||
|
consecutive_skipped_steps = 0;
|
||||||
accumulated_error = 0.0f;
|
accumulated_error = 0.0f;
|
||||||
reference_output_norm = 0.0f;
|
|
||||||
block_metrics.reset();
|
block_metrics.reset();
|
||||||
total_active_steps = 0;
|
total_active_steps = 0;
|
||||||
}
|
}
|
||||||
@ -133,7 +138,8 @@ struct UCacheState {
|
|||||||
if (!initialized || sigmas.size() < 2) {
|
if (!initialized || sigmas.size() < 2) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
size_t n_steps = sigmas.size() - 1;
|
size_t n_steps = sigmas.size() - 1;
|
||||||
|
expected_total_steps = static_cast<int>(n_steps);
|
||||||
|
|
||||||
size_t start_step = static_cast<size_t>(config.start_percent * n_steps);
|
size_t start_step = static_cast<size_t>(config.start_percent * n_steps);
|
||||||
size_t end_step = static_cast<size_t>(config.end_percent * n_steps);
|
size_t end_step = static_cast<size_t>(config.end_percent * n_steps);
|
||||||
@ -207,11 +213,15 @@ struct UCacheState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int effective_total = estimated_total_steps;
|
int effective_total = estimated_total_steps;
|
||||||
|
if (effective_total <= 0) {
|
||||||
|
effective_total = expected_total_steps;
|
||||||
|
}
|
||||||
if (effective_total <= 0) {
|
if (effective_total <= 0) {
|
||||||
effective_total = std::max(20, steps_computed_since_active * 2);
|
effective_total = std::max(20, steps_computed_since_active * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
float progress = (effective_total > 0) ? (static_cast<float>(steps_computed_since_active) / effective_total) : 0.0f;
|
float progress = (effective_total > 0) ? (static_cast<float>(steps_computed_since_active) / effective_total) : 0.0f;
|
||||||
|
progress = std::max(0.0f, std::min(1.0f, progress));
|
||||||
|
|
||||||
float multiplier = 1.0f;
|
float multiplier = 1.0f;
|
||||||
if (progress < 0.2f) {
|
if (progress < 0.2f) {
|
||||||
@ -309,17 +319,31 @@ struct UCacheState {
|
|||||||
|
|
||||||
if (has_output_prev_norm && has_relative_transformation_rate &&
|
if (has_output_prev_norm && has_relative_transformation_rate &&
|
||||||
last_input_change > 0.0f && output_prev_norm > 0.0f) {
|
last_input_change > 0.0f && output_prev_norm > 0.0f) {
|
||||||
float approx_output_change_rate = (relative_transformation_rate * last_input_change) / output_prev_norm;
|
float approx_output_change = relative_transformation_rate * last_input_change;
|
||||||
accumulated_error = accumulated_error * config.error_decay_rate + approx_output_change_rate;
|
float approx_output_change_rate;
|
||||||
|
if (config.use_relative_threshold) {
|
||||||
|
float base_scale = std::max(output_prev_norm, 1e-6f);
|
||||||
|
float dyn_scale = has_output_change_ema
|
||||||
|
? std::max(output_change_ema * std::max(1.0f, config.relative_norm_gain), 1e-6f)
|
||||||
|
: base_scale;
|
||||||
|
float scale = std::sqrt(base_scale * dyn_scale);
|
||||||
|
approx_output_change_rate = approx_output_change / scale;
|
||||||
|
} else {
|
||||||
|
approx_output_change_rate = approx_output_change;
|
||||||
|
}
|
||||||
|
// Increase estimated error with skip horizon to avoid long extrapolation streaks
|
||||||
|
approx_output_change_rate *= (1.0f + 0.50f * consecutive_skipped_steps);
|
||||||
|
accumulated_error = accumulated_error * config.error_decay_rate + approx_output_change_rate;
|
||||||
|
|
||||||
float effective_threshold = get_adaptive_threshold();
|
float effective_threshold = get_adaptive_threshold();
|
||||||
if (config.use_relative_threshold && reference_output_norm > 0.0f) {
|
if (!config.use_relative_threshold && output_prev_norm > 0.0f) {
|
||||||
effective_threshold = effective_threshold * reference_output_norm;
|
effective_threshold = effective_threshold * output_prev_norm;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (accumulated_error < effective_threshold) {
|
if (accumulated_error < effective_threshold) {
|
||||||
skip_current_step = true;
|
skip_current_step = true;
|
||||||
total_steps_skipped++;
|
total_steps_skipped++;
|
||||||
|
consecutive_skipped_steps++;
|
||||||
apply_cache(cond, input, output);
|
apply_cache(cond, input, output);
|
||||||
return true;
|
return true;
|
||||||
} else if (config.reset_error_on_compute) {
|
} else if (config.reset_error_on_compute) {
|
||||||
@ -340,6 +364,8 @@ struct UCacheState {
|
|||||||
if (cond != anchor_condition) {
|
if (cond != anchor_condition) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
steps_computed_since_active++;
|
||||||
|
consecutive_skipped_steps = 0;
|
||||||
|
|
||||||
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
||||||
float* in_data = (float*)input->data;
|
float* in_data = (float*)input->data;
|
||||||
@ -359,6 +385,14 @@ struct UCacheState {
|
|||||||
output_change /= static_cast<float>(ne);
|
output_change /= static_cast<float>(ne);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (std::isfinite(output_change) && output_change > 0.0f) {
|
||||||
|
if (!has_output_change_ema) {
|
||||||
|
output_change_ema = output_change;
|
||||||
|
has_output_change_ema = true;
|
||||||
|
} else {
|
||||||
|
output_change_ema = 0.8f * output_change_ema + 0.2f * output_change;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
prev_output.resize(ne);
|
prev_output.resize(ne);
|
||||||
for (size_t i = 0; i < ne; ++i) {
|
for (size_t i = 0; i < ne; ++i) {
|
||||||
@ -373,10 +407,6 @@ struct UCacheState {
|
|||||||
output_prev_norm = (ne > 0) ? (mean_abs / static_cast<float>(ne)) : 0.0f;
|
output_prev_norm = (ne > 0) ? (mean_abs / static_cast<float>(ne)) : 0.0f;
|
||||||
has_output_prev_norm = output_prev_norm > 0.0f;
|
has_output_prev_norm = output_prev_norm > 0.0f;
|
||||||
|
|
||||||
if (reference_output_norm == 0.0f) {
|
|
||||||
reference_output_norm = output_prev_norm;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (has_last_input_change && last_input_change > 0.0f && output_change > 0.0f) {
|
if (has_last_input_change && last_input_change > 0.0f && output_change > 0.0f) {
|
||||||
float rate = output_change / last_input_change;
|
float rate = output_change / last_input_change;
|
||||||
if (std::isfinite(rate)) {
|
if (std::isfinite(rate)) {
|
||||||
|
|||||||
114
src/unet.hpp
114
src/unet.hpp
@ -60,10 +60,10 @@ public:
|
|||||||
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
|
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int timesteps) {
|
int timesteps) {
|
||||||
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w], t == timesteps
|
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w], t == timesteps
|
||||||
// context: [N, max_position(aka n_context), hidden_size(aka context_dim)] aka [b*t, n_context, context_dim], t == timesteps
|
// context: [N, max_position(aka n_context), hidden_size(aka context_dim)] aka [b*t, n_context, context_dim], t == timesteps
|
||||||
// t_emb: [N, in_channels] aka [b*t, in_channels]
|
// t_emb: [N, in_channels] aka [b*t, in_channels]
|
||||||
@ -388,11 +388,11 @@ public:
|
|||||||
blocks["out.2"] = std::shared_ptr<GGMLBlock>(new Conv2d(model_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
blocks["out.2"] = std::shared_ptr<GGMLBlock>(new Conv2d(model_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* resblock_forward(std::string name,
|
ggml_tensor* resblock_forward(std::string name,
|
||||||
GGMLRunnerContext* ctx,
|
GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* emb,
|
ggml_tensor* emb,
|
||||||
int num_video_frames) {
|
int num_video_frames) {
|
||||||
if (version == VERSION_SVD) {
|
if (version == VERSION_SVD) {
|
||||||
auto block = std::dynamic_pointer_cast<VideoResBlock>(blocks[name]);
|
auto block = std::dynamic_pointer_cast<VideoResBlock>(blocks[name]);
|
||||||
|
|
||||||
@ -404,11 +404,11 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* attention_layer_forward(std::string name,
|
ggml_tensor* attention_layer_forward(std::string name,
|
||||||
GGMLRunnerContext* ctx,
|
GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int timesteps) {
|
int timesteps) {
|
||||||
if (version == VERSION_SVD) {
|
if (version == VERSION_SVD) {
|
||||||
auto block = std::dynamic_pointer_cast<SpatialVideoTransformer>(blocks[name]);
|
auto block = std::dynamic_pointer_cast<SpatialVideoTransformer>(blocks[name]);
|
||||||
|
|
||||||
@ -420,15 +420,15 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat = nullptr,
|
ggml_tensor* c_concat = nullptr,
|
||||||
struct ggml_tensor* y = nullptr,
|
ggml_tensor* y = nullptr,
|
||||||
int num_video_frames = -1,
|
int num_video_frames = -1,
|
||||||
std::vector<struct ggml_tensor*> controls = {},
|
std::vector<ggml_tensor*> controls = {},
|
||||||
float control_strength = 0.f) {
|
float control_strength = 0.f) {
|
||||||
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
|
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
|
||||||
// timesteps: [N,]
|
// timesteps: [N,]
|
||||||
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
|
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
|
||||||
@ -480,7 +480,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// input_blocks
|
// input_blocks
|
||||||
std::vector<struct ggml_tensor*> hs;
|
std::vector<ggml_tensor*> hs;
|
||||||
|
|
||||||
// input block 0
|
// input block 0
|
||||||
auto h = input_blocks_0_0->forward(ctx, x);
|
auto h = input_blocks_0_0->forward(ctx, x);
|
||||||
@ -605,19 +605,19 @@ struct UNetModelRunner : public GGMLRunner {
|
|||||||
return "unet";
|
return "unet";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
unet.get_param_tensors(tensors, prefix);
|
unet.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat = nullptr,
|
ggml_tensor* c_concat = nullptr,
|
||||||
struct ggml_tensor* y = nullptr,
|
ggml_tensor* y = nullptr,
|
||||||
int num_video_frames = -1,
|
int num_video_frames = -1,
|
||||||
std::vector<struct ggml_tensor*> controls = {},
|
std::vector<ggml_tensor*> controls = {},
|
||||||
float control_strength = 0.f) {
|
float control_strength = 0.f) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
|
||||||
|
|
||||||
if (num_video_frames == -1) {
|
if (num_video_frames == -1) {
|
||||||
num_video_frames = static_cast<int>(x->ne[3]);
|
num_video_frames = static_cast<int>(x->ne[3]);
|
||||||
@ -635,15 +635,15 @@ struct UNetModelRunner : public GGMLRunner {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = unet.forward(&runner_ctx,
|
ggml_tensor* out = unet.forward(&runner_ctx,
|
||||||
x,
|
x,
|
||||||
timesteps,
|
timesteps,
|
||||||
context,
|
context,
|
||||||
c_concat,
|
c_concat,
|
||||||
y,
|
y,
|
||||||
num_video_frames,
|
num_video_frames,
|
||||||
controls,
|
controls,
|
||||||
control_strength);
|
control_strength);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
@ -651,22 +651,22 @@ struct UNetModelRunner : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* c_concat,
|
ggml_tensor* c_concat,
|
||||||
struct ggml_tensor* y,
|
ggml_tensor* y,
|
||||||
int num_video_frames = -1,
|
int num_video_frames = -1,
|
||||||
std::vector<struct ggml_tensor*> controls = {},
|
std::vector<ggml_tensor*> controls = {},
|
||||||
float control_strength = 0.f,
|
float control_strength = 0.f,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// timesteps: [N, ]
|
// timesteps: [N, ]
|
||||||
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
|
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
|
||||||
// c_concat: [N, in_channels, h, w] or [1, in_channels, h, w]
|
// c_concat: [N, in_channels, h, w] or [1, in_channels, h, w]
|
||||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength);
|
return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -674,12 +674,12 @@ struct UNetModelRunner : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -703,7 +703,7 @@ struct UNetModelRunner : public GGMLRunner {
|
|||||||
ggml_set_f32(y, 0.5f);
|
ggml_set_f32(y, 0.5f);
|
||||||
// print_ggml_tensor(y);
|
// print_ggml_tensor(y);
|
||||||
|
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx);
|
compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx);
|
||||||
|
|||||||
@ -72,13 +72,13 @@ struct UpscalerGGML {
|
|||||||
LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
|
LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
|
||||||
input_image.width, input_image.height, output_width, output_height);
|
input_image.width, input_image.height, output_width, output_height);
|
||||||
|
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
|
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
// draft context
|
// draft context
|
||||||
struct ggml_context* upscale_ctx = ggml_init(params);
|
ggml_context* upscale_ctx = ggml_init(params);
|
||||||
if (!upscale_ctx) {
|
if (!upscale_ctx) {
|
||||||
LOG_ERROR("ggml_init() failed");
|
LOG_ERROR("ggml_init() failed");
|
||||||
return upscaled_image;
|
return upscaled_image;
|
||||||
@ -92,7 +92,8 @@ struct UpscalerGGML {
|
|||||||
return esrgan_upscaler->compute(n_threads, in, &out);
|
return esrgan_upscaler->compute(n_threads, in, &out);
|
||||||
};
|
};
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, on_tiling);
|
// TODO: circular upscaling?
|
||||||
|
sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, false, false, on_tiling);
|
||||||
esrgan_upscaler->free_compute_buffer();
|
esrgan_upscaler->free_compute_buffer();
|
||||||
ggml_ext_tensor_clamp_inplace(upscaled, 0.f, 1.f);
|
ggml_ext_tensor_clamp_inplace(upscaled, 0.f, 1.f);
|
||||||
uint8_t* upscaled_data = ggml_tensor_to_sd_image(upscaled);
|
uint8_t* upscaled_data = ggml_tensor_to_sd_image(upscaled);
|
||||||
|
|||||||
933
src/vae.hpp
933
src/vae.hpp
@ -3,635 +3,206 @@
|
|||||||
|
|
||||||
#include "common_block.hpp"
|
#include "common_block.hpp"
|
||||||
|
|
||||||
/*================================================== AutoEncoderKL ===================================================*/
|
struct VAE : public GGMLRunner {
|
||||||
|
|
||||||
#define VAE_GRAPH_SIZE 20480
|
|
||||||
|
|
||||||
class ResnetBlock : public UnaryBlock {
|
|
||||||
protected:
|
|
||||||
int64_t in_channels;
|
|
||||||
int64_t out_channels;
|
|
||||||
|
|
||||||
public:
|
|
||||||
ResnetBlock(int64_t in_channels,
|
|
||||||
int64_t out_channels)
|
|
||||||
: in_channels(in_channels),
|
|
||||||
out_channels(out_channels) {
|
|
||||||
// temb_channels is always 0
|
|
||||||
blocks["norm1"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(in_channels));
|
|
||||||
blocks["conv1"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
|
||||||
|
|
||||||
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(out_channels));
|
|
||||||
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new Conv2d(out_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
|
||||||
|
|
||||||
if (out_channels != in_channels) {
|
|
||||||
blocks["nin_shortcut"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, {1, 1}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
|
||||||
// x: [N, in_channels, h, w]
|
|
||||||
// t_emb is always None
|
|
||||||
auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]);
|
|
||||||
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv1"]);
|
|
||||||
auto norm2 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm2"]);
|
|
||||||
auto conv2 = std::dynamic_pointer_cast<Conv2d>(blocks["conv2"]);
|
|
||||||
|
|
||||||
auto h = x;
|
|
||||||
h = norm1->forward(ctx, h);
|
|
||||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // swish
|
|
||||||
h = conv1->forward(ctx, h);
|
|
||||||
// return h;
|
|
||||||
|
|
||||||
h = norm2->forward(ctx, h);
|
|
||||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // swish
|
|
||||||
// dropout, skip for inference
|
|
||||||
h = conv2->forward(ctx, h);
|
|
||||||
|
|
||||||
// skip connection
|
|
||||||
if (out_channels != in_channels) {
|
|
||||||
auto nin_shortcut = std::dynamic_pointer_cast<Conv2d>(blocks["nin_shortcut"]);
|
|
||||||
|
|
||||||
x = nin_shortcut->forward(ctx, x); // [N, out_channels, h, w]
|
|
||||||
}
|
|
||||||
|
|
||||||
h = ggml_add(ctx->ggml_ctx, h, x);
|
|
||||||
return h; // [N, out_channels, h, w]
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class AttnBlock : public UnaryBlock {
|
|
||||||
protected:
|
|
||||||
int64_t in_channels;
|
|
||||||
bool use_linear;
|
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
|
||||||
auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
|
|
||||||
if (iter != tensor_storage_map.end()) {
|
|
||||||
if (iter->second.n_dims == 4 && use_linear) {
|
|
||||||
use_linear = false;
|
|
||||||
blocks["q"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
|
||||||
blocks["k"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
|
||||||
blocks["v"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
|
||||||
blocks["proj_out"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
|
||||||
} else if (iter->second.n_dims == 2 && !use_linear) {
|
|
||||||
use_linear = true;
|
|
||||||
blocks["q"] = std::make_shared<Linear>(in_channels, in_channels);
|
|
||||||
blocks["k"] = std::make_shared<Linear>(in_channels, in_channels);
|
|
||||||
blocks["v"] = std::make_shared<Linear>(in_channels, in_channels);
|
|
||||||
blocks["proj_out"] = std::make_shared<Linear>(in_channels, in_channels);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
AttnBlock(int64_t in_channels, bool use_linear)
|
|
||||||
: in_channels(in_channels), use_linear(use_linear) {
|
|
||||||
blocks["norm"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(in_channels));
|
|
||||||
if (use_linear) {
|
|
||||||
blocks["q"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
|
||||||
blocks["k"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
|
||||||
blocks["v"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
|
||||||
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
|
||||||
} else {
|
|
||||||
blocks["q"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
|
||||||
blocks["k"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
|
||||||
blocks["v"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
|
||||||
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
|
||||||
// x: [N, in_channels, h, w]
|
|
||||||
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
|
|
||||||
auto q_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["q"]);
|
|
||||||
auto k_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["k"]);
|
|
||||||
auto v_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["v"]);
|
|
||||||
auto proj_out = std::dynamic_pointer_cast<UnaryBlock>(blocks["proj_out"]);
|
|
||||||
|
|
||||||
auto h_ = norm->forward(ctx, x);
|
|
||||||
|
|
||||||
const int64_t n = h_->ne[3];
|
|
||||||
const int64_t c = h_->ne[2];
|
|
||||||
const int64_t h = h_->ne[1];
|
|
||||||
const int64_t w = h_->ne[0];
|
|
||||||
|
|
||||||
ggml_tensor* q;
|
|
||||||
ggml_tensor* k;
|
|
||||||
ggml_tensor* v;
|
|
||||||
if (use_linear) {
|
|
||||||
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
|
||||||
h_ = ggml_reshape_3d(ctx->ggml_ctx, h_, c, h * w, n); // [N, h * w, in_channels]
|
|
||||||
|
|
||||||
q = q_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
|
||||||
k = k_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
|
||||||
v = v_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
|
||||||
} else {
|
|
||||||
q = q_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
|
||||||
q = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, q, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
|
||||||
q = ggml_reshape_3d(ctx->ggml_ctx, q, c, h * w, n); // [N, h * w, in_channels]
|
|
||||||
|
|
||||||
k = k_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
|
||||||
k = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, k, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
|
||||||
k = ggml_reshape_3d(ctx->ggml_ctx, k, c, h * w, n); // [N, h * w, in_channels]
|
|
||||||
|
|
||||||
v = v_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
|
||||||
v = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, v, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
|
||||||
v = ggml_reshape_3d(ctx->ggml_ctx, v, c, h * w, n); // [N, h * w, in_channels]
|
|
||||||
}
|
|
||||||
|
|
||||||
h_ = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled);
|
|
||||||
|
|
||||||
if (use_linear) {
|
|
||||||
h_ = proj_out->forward(ctx, h_); // [N, h * w, in_channels]
|
|
||||||
|
|
||||||
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 0, 2, 3)); // [N, in_channels, h * w]
|
|
||||||
h_ = ggml_reshape_4d(ctx->ggml_ctx, h_, w, h, c, n); // [N, in_channels, h, w]
|
|
||||||
} else {
|
|
||||||
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 0, 2, 3)); // [N, in_channels, h * w]
|
|
||||||
h_ = ggml_reshape_4d(ctx->ggml_ctx, h_, w, h, c, n); // [N, in_channels, h, w]
|
|
||||||
|
|
||||||
h_ = proj_out->forward(ctx, h_); // [N, in_channels, h, w]
|
|
||||||
}
|
|
||||||
|
|
||||||
h_ = ggml_add(ctx->ggml_ctx, h_, x);
|
|
||||||
return h_;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class AE3DConv : public Conv2d {
|
|
||||||
public:
|
|
||||||
AE3DConv(int64_t in_channels,
|
|
||||||
int64_t out_channels,
|
|
||||||
std::pair<int, int> kernel_size,
|
|
||||||
int video_kernel_size = 3,
|
|
||||||
std::pair<int, int> stride = {1, 1},
|
|
||||||
std::pair<int, int> padding = {0, 0},
|
|
||||||
std::pair<int, int> dilation = {1, 1},
|
|
||||||
bool bias = true)
|
|
||||||
: Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias) {
|
|
||||||
int kernel_padding = video_kernel_size / 2;
|
|
||||||
blocks["time_mix_conv"] = std::shared_ptr<GGMLBlock>(new Conv3d(out_channels,
|
|
||||||
out_channels,
|
|
||||||
{video_kernel_size, 1, 1},
|
|
||||||
{1, 1, 1},
|
|
||||||
{kernel_padding, 0, 0}));
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
|
||||||
struct ggml_tensor* x) override {
|
|
||||||
// timesteps always None
|
|
||||||
// skip_video always False
|
|
||||||
// x: [N, IC, IH, IW]
|
|
||||||
// result: [N, OC, OH, OW]
|
|
||||||
auto time_mix_conv = std::dynamic_pointer_cast<Conv3d>(blocks["time_mix_conv"]);
|
|
||||||
|
|
||||||
x = Conv2d::forward(ctx, x);
|
|
||||||
// timesteps = x.shape[0]
|
|
||||||
// x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
|
|
||||||
// x = conv3d(x)
|
|
||||||
// return rearrange(x, "b c t h w -> (b t) c h w")
|
|
||||||
int64_t T = x->ne[3];
|
|
||||||
int64_t B = x->ne[3] / T;
|
|
||||||
int64_t C = x->ne[2];
|
|
||||||
int64_t H = x->ne[1];
|
|
||||||
int64_t W = x->ne[0];
|
|
||||||
|
|
||||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W * H, C, T, B); // (b t) c h w -> b t c (h w)
|
|
||||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b t c (h w) -> b c t (h w)
|
|
||||||
x = time_mix_conv->forward(ctx, x); // [B, OC, T, OH * OW]
|
|
||||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
|
|
||||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w
|
|
||||||
return x; // [B*T, OC, OH, OW]
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class VideoResnetBlock : public ResnetBlock {
|
|
||||||
protected:
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
|
||||||
enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_storage_map, GGML_TYPE_F32);
|
|
||||||
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
float get_alpha() {
|
|
||||||
float alpha = ggml_ext_backend_tensor_get_f32(params["mix_factor"]);
|
|
||||||
return sigmoid(alpha);
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
VideoResnetBlock(int64_t in_channels,
|
|
||||||
int64_t out_channels,
|
|
||||||
int video_kernel_size = 3)
|
|
||||||
: ResnetBlock(in_channels, out_channels) {
|
|
||||||
// merge_strategy is always learned
|
|
||||||
blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true));
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
|
||||||
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w]
|
|
||||||
// return: [N, out_channels, h, w] aka [b*t, out_channels, h, w]
|
|
||||||
// t_emb is always None
|
|
||||||
// skip_video is always False
|
|
||||||
// timesteps is always None
|
|
||||||
auto time_stack = std::dynamic_pointer_cast<ResBlock>(blocks["time_stack"]);
|
|
||||||
|
|
||||||
x = ResnetBlock::forward(ctx, x); // [N, out_channels, h, w]
|
|
||||||
// return x;
|
|
||||||
|
|
||||||
int64_t T = x->ne[3];
|
|
||||||
int64_t B = x->ne[3] / T;
|
|
||||||
int64_t C = x->ne[2];
|
|
||||||
int64_t H = x->ne[1];
|
|
||||||
int64_t W = x->ne[0];
|
|
||||||
|
|
||||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W * H, C, T, B); // (b t) c h w -> b t c (h w)
|
|
||||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b t c (h w) -> b c t (h w)
|
|
||||||
auto x_mix = x;
|
|
||||||
|
|
||||||
x = time_stack->forward(ctx, x); // b t c (h w)
|
|
||||||
|
|
||||||
float alpha = get_alpha();
|
|
||||||
x = ggml_add(ctx->ggml_ctx,
|
|
||||||
ggml_ext_scale(ctx->ggml_ctx, x, alpha),
|
|
||||||
ggml_ext_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha));
|
|
||||||
|
|
||||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
|
|
||||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w
|
|
||||||
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// ldm.modules.diffusionmodules.model.Encoder
|
|
||||||
class Encoder : public GGMLBlock {
|
|
||||||
protected:
|
|
||||||
int ch = 128;
|
|
||||||
std::vector<int> ch_mult = {1, 2, 4, 4};
|
|
||||||
int num_res_blocks = 2;
|
|
||||||
int in_channels = 3;
|
|
||||||
int z_channels = 4;
|
|
||||||
bool double_z = true;
|
|
||||||
|
|
||||||
public:
|
|
||||||
Encoder(int ch,
|
|
||||||
std::vector<int> ch_mult,
|
|
||||||
int num_res_blocks,
|
|
||||||
int in_channels,
|
|
||||||
int z_channels,
|
|
||||||
bool double_z = true,
|
|
||||||
bool use_linear_projection = false)
|
|
||||||
: ch(ch),
|
|
||||||
ch_mult(ch_mult),
|
|
||||||
num_res_blocks(num_res_blocks),
|
|
||||||
in_channels(in_channels),
|
|
||||||
z_channels(z_channels),
|
|
||||||
double_z(double_z) {
|
|
||||||
blocks["conv_in"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, ch, {3, 3}, {1, 1}, {1, 1}));
|
|
||||||
|
|
||||||
size_t num_resolutions = ch_mult.size();
|
|
||||||
|
|
||||||
int block_in = 1;
|
|
||||||
for (int i = 0; i < num_resolutions; i++) {
|
|
||||||
if (i == 0) {
|
|
||||||
block_in = ch;
|
|
||||||
} else {
|
|
||||||
block_in = ch * ch_mult[i - 1];
|
|
||||||
}
|
|
||||||
int block_out = ch * ch_mult[i];
|
|
||||||
for (int j = 0; j < num_res_blocks; j++) {
|
|
||||||
std::string name = "down." + std::to_string(i) + ".block." + std::to_string(j);
|
|
||||||
blocks[name] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_out));
|
|
||||||
block_in = block_out;
|
|
||||||
}
|
|
||||||
if (i != num_resolutions - 1) {
|
|
||||||
std::string name = "down." + std::to_string(i) + ".downsample";
|
|
||||||
blocks[name] = std::shared_ptr<GGMLBlock>(new DownSampleBlock(block_in, block_in, true));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
blocks["mid.block_1"] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_in));
|
|
||||||
blocks["mid.attn_1"] = std::shared_ptr<GGMLBlock>(new AttnBlock(block_in, use_linear_projection));
|
|
||||||
blocks["mid.block_2"] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_in));
|
|
||||||
|
|
||||||
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(block_in));
|
|
||||||
blocks["conv_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(block_in, double_z ? z_channels * 2 : z_channels, {3, 3}, {1, 1}, {1, 1}));
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
|
||||||
// x: [N, in_channels, h, w]
|
|
||||||
|
|
||||||
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
|
|
||||||
auto mid_block_1 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_1"]);
|
|
||||||
auto mid_attn_1 = std::dynamic_pointer_cast<AttnBlock>(blocks["mid.attn_1"]);
|
|
||||||
auto mid_block_2 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_2"]);
|
|
||||||
auto norm_out = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm_out"]);
|
|
||||||
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
|
||||||
|
|
||||||
auto h = conv_in->forward(ctx, x); // [N, ch, h, w]
|
|
||||||
|
|
||||||
// downsampling
|
|
||||||
size_t num_resolutions = ch_mult.size();
|
|
||||||
for (int i = 0; i < num_resolutions; i++) {
|
|
||||||
for (int j = 0; j < num_res_blocks; j++) {
|
|
||||||
std::string name = "down." + std::to_string(i) + ".block." + std::to_string(j);
|
|
||||||
auto down_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
|
||||||
|
|
||||||
h = down_block->forward(ctx, h);
|
|
||||||
}
|
|
||||||
if (i != num_resolutions - 1) {
|
|
||||||
std::string name = "down." + std::to_string(i) + ".downsample";
|
|
||||||
auto down_sample = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
|
||||||
|
|
||||||
h = down_sample->forward(ctx, h);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// middle
|
|
||||||
h = mid_block_1->forward(ctx, h);
|
|
||||||
h = mid_attn_1->forward(ctx, h);
|
|
||||||
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
|
||||||
|
|
||||||
// end
|
|
||||||
h = norm_out->forward(ctx, h);
|
|
||||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // nonlinearity/swish
|
|
||||||
h = conv_out->forward(ctx, h); // [N, z_channels*2, h, w]
|
|
||||||
return h;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// ldm.modules.diffusionmodules.model.Decoder
|
|
||||||
class Decoder : public GGMLBlock {
|
|
||||||
protected:
|
|
||||||
int ch = 128;
|
|
||||||
int out_ch = 3;
|
|
||||||
std::vector<int> ch_mult = {1, 2, 4, 4};
|
|
||||||
int num_res_blocks = 2;
|
|
||||||
int z_channels = 4;
|
|
||||||
bool video_decoder = false;
|
|
||||||
int video_kernel_size = 3;
|
|
||||||
|
|
||||||
virtual std::shared_ptr<GGMLBlock> get_conv_out(int64_t in_channels,
|
|
||||||
int64_t out_channels,
|
|
||||||
std::pair<int, int> kernel_size,
|
|
||||||
std::pair<int, int> stride = {1, 1},
|
|
||||||
std::pair<int, int> padding = {0, 0}) {
|
|
||||||
if (video_decoder) {
|
|
||||||
return std::shared_ptr<GGMLBlock>(new AE3DConv(in_channels, out_channels, kernel_size, video_kernel_size, stride, padding));
|
|
||||||
} else {
|
|
||||||
return std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, kernel_size, stride, padding));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual std::shared_ptr<GGMLBlock> get_resnet_block(int64_t in_channels,
|
|
||||||
int64_t out_channels) {
|
|
||||||
if (video_decoder) {
|
|
||||||
return std::shared_ptr<GGMLBlock>(new VideoResnetBlock(in_channels, out_channels, video_kernel_size));
|
|
||||||
} else {
|
|
||||||
return std::shared_ptr<GGMLBlock>(new ResnetBlock(in_channels, out_channels));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
Decoder(int ch,
|
|
||||||
int out_ch,
|
|
||||||
std::vector<int> ch_mult,
|
|
||||||
int num_res_blocks,
|
|
||||||
int z_channels,
|
|
||||||
bool use_linear_projection = false,
|
|
||||||
bool video_decoder = false,
|
|
||||||
int video_kernel_size = 3)
|
|
||||||
: ch(ch),
|
|
||||||
out_ch(out_ch),
|
|
||||||
ch_mult(ch_mult),
|
|
||||||
num_res_blocks(num_res_blocks),
|
|
||||||
z_channels(z_channels),
|
|
||||||
video_decoder(video_decoder),
|
|
||||||
video_kernel_size(video_kernel_size) {
|
|
||||||
int num_resolutions = static_cast<int>(ch_mult.size());
|
|
||||||
int block_in = ch * ch_mult[num_resolutions - 1];
|
|
||||||
|
|
||||||
blocks["conv_in"] = std::shared_ptr<GGMLBlock>(new Conv2d(z_channels, block_in, {3, 3}, {1, 1}, {1, 1}));
|
|
||||||
|
|
||||||
blocks["mid.block_1"] = get_resnet_block(block_in, block_in);
|
|
||||||
blocks["mid.attn_1"] = std::shared_ptr<GGMLBlock>(new AttnBlock(block_in, use_linear_projection));
|
|
||||||
blocks["mid.block_2"] = get_resnet_block(block_in, block_in);
|
|
||||||
|
|
||||||
for (int i = num_resolutions - 1; i >= 0; i--) {
|
|
||||||
int mult = ch_mult[i];
|
|
||||||
int block_out = ch * mult;
|
|
||||||
for (int j = 0; j < num_res_blocks + 1; j++) {
|
|
||||||
std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j);
|
|
||||||
blocks[name] = get_resnet_block(block_in, block_out);
|
|
||||||
|
|
||||||
block_in = block_out;
|
|
||||||
}
|
|
||||||
if (i != 0) {
|
|
||||||
std::string name = "up." + std::to_string(i) + ".upsample";
|
|
||||||
blocks[name] = std::shared_ptr<GGMLBlock>(new UpSampleBlock(block_in, block_in));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(block_in));
|
|
||||||
blocks["conv_out"] = get_conv_out(block_in, out_ch, {3, 3}, {1, 1}, {1, 1});
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) {
|
|
||||||
// z: [N, z_channels, h, w]
|
|
||||||
// alpha is always 0
|
|
||||||
// merge_strategy is always learned
|
|
||||||
// time_mode is always conv-only, so we need to replace conv_out_op/resnet_op to AE3DConv/VideoResBlock
|
|
||||||
// AttnVideoBlock will not be used
|
|
||||||
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
|
|
||||||
auto mid_block_1 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_1"]);
|
|
||||||
auto mid_attn_1 = std::dynamic_pointer_cast<AttnBlock>(blocks["mid.attn_1"]);
|
|
||||||
auto mid_block_2 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_2"]);
|
|
||||||
auto norm_out = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm_out"]);
|
|
||||||
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
|
||||||
|
|
||||||
// conv_in
|
|
||||||
auto h = conv_in->forward(ctx, z); // [N, block_in, h, w]
|
|
||||||
|
|
||||||
// middle
|
|
||||||
h = mid_block_1->forward(ctx, h);
|
|
||||||
// return h;
|
|
||||||
|
|
||||||
h = mid_attn_1->forward(ctx, h);
|
|
||||||
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
|
||||||
|
|
||||||
// upsampling
|
|
||||||
int num_resolutions = static_cast<int>(ch_mult.size());
|
|
||||||
for (int i = num_resolutions - 1; i >= 0; i--) {
|
|
||||||
for (int j = 0; j < num_res_blocks + 1; j++) {
|
|
||||||
std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j);
|
|
||||||
auto up_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
|
||||||
|
|
||||||
h = up_block->forward(ctx, h);
|
|
||||||
}
|
|
||||||
if (i != 0) {
|
|
||||||
std::string name = "up." + std::to_string(i) + ".upsample";
|
|
||||||
auto up_sample = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]);
|
|
||||||
|
|
||||||
h = up_sample->forward(ctx, h);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
h = norm_out->forward(ctx, h);
|
|
||||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // nonlinearity/swish
|
|
||||||
h = conv_out->forward(ctx, h); // [N, out_ch, h*8, w*8]
|
|
||||||
return h;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// ldm.models.autoencoder.AutoencoderKL
|
|
||||||
class AutoencodingEngine : public GGMLBlock {
|
|
||||||
protected:
|
protected:
|
||||||
SDVersion version;
|
SDVersion version;
|
||||||
bool decode_only = true;
|
bool scale_input = true;
|
||||||
bool use_video_decoder = false;
|
virtual bool _compute(const int n_threads,
|
||||||
bool use_quant = true;
|
ggml_tensor* z,
|
||||||
int embed_dim = 4;
|
bool decode_graph,
|
||||||
struct {
|
ggml_tensor** output,
|
||||||
int z_channels = 4;
|
ggml_context* output_ctx) = 0;
|
||||||
int resolution = 256;
|
|
||||||
int in_channels = 3;
|
|
||||||
int out_ch = 3;
|
|
||||||
int ch = 128;
|
|
||||||
std::vector<int> ch_mult = {1, 2, 4, 4};
|
|
||||||
int num_res_blocks = 2;
|
|
||||||
bool double_z = true;
|
|
||||||
} dd_config;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AutoencodingEngine(SDVersion version = VERSION_SD1,
|
VAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu)
|
||||||
bool decode_only = true,
|
: version(version), GGMLRunner(backend, offload_params_to_cpu) {}
|
||||||
bool use_linear_projection = false,
|
|
||||||
bool use_video_decoder = false)
|
int get_scale_factor() {
|
||||||
: version(version), decode_only(decode_only), use_video_decoder(use_video_decoder) {
|
int scale_factor = 8;
|
||||||
if (sd_version_is_dit(version)) {
|
if (version == VERSION_WAN2_2_TI2V) {
|
||||||
if (sd_version_is_flux2(version)) {
|
scale_factor = 16;
|
||||||
dd_config.z_channels = 32;
|
} else if (sd_version_is_flux2(version)) {
|
||||||
embed_dim = 32;
|
scale_factor = 16;
|
||||||
|
} else if (version == VERSION_CHROMA_RADIANCE) {
|
||||||
|
scale_factor = 1;
|
||||||
|
}
|
||||||
|
return scale_factor;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual int get_encoder_output_channels(int input_channels) = 0;
|
||||||
|
|
||||||
|
void get_tile_sizes(int& tile_size_x,
|
||||||
|
int& tile_size_y,
|
||||||
|
float& tile_overlap,
|
||||||
|
const sd_tiling_params_t& params,
|
||||||
|
int64_t latent_x,
|
||||||
|
int64_t latent_y,
|
||||||
|
float encoding_factor = 1.0f) {
|
||||||
|
tile_overlap = std::max(std::min(params.target_overlap, 0.5f), 0.0f);
|
||||||
|
auto get_tile_size = [&](int requested_size, float factor, int64_t latent_size) {
|
||||||
|
const int default_tile_size = 32;
|
||||||
|
const int min_tile_dimension = 4;
|
||||||
|
int tile_size = default_tile_size;
|
||||||
|
// factor <= 1 means simple fraction of the latent dimension
|
||||||
|
// factor > 1 means number of tiles across that dimension
|
||||||
|
if (factor > 0.f) {
|
||||||
|
if (factor > 1.0)
|
||||||
|
factor = 1 / (factor - factor * tile_overlap + tile_overlap);
|
||||||
|
tile_size = static_cast<int>(std::round(latent_size * factor));
|
||||||
|
} else if (requested_size >= min_tile_dimension) {
|
||||||
|
tile_size = requested_size;
|
||||||
|
}
|
||||||
|
tile_size = static_cast<int>(tile_size * encoding_factor);
|
||||||
|
return std::max(std::min(tile_size, static_cast<int>(latent_size)), min_tile_dimension);
|
||||||
|
};
|
||||||
|
|
||||||
|
tile_size_x = get_tile_size(params.tile_size_x, params.rel_size_x, latent_x);
|
||||||
|
tile_size_y = get_tile_size(params.tile_size_y, params.rel_size_y, latent_y);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* encode(int n_threads,
|
||||||
|
ggml_context* work_ctx,
|
||||||
|
ggml_tensor* x,
|
||||||
|
sd_tiling_params_t tiling_params,
|
||||||
|
bool circular_x = false,
|
||||||
|
bool circular_y = false) {
|
||||||
|
int64_t t0 = ggml_time_ms();
|
||||||
|
ggml_tensor* result = nullptr;
|
||||||
|
const int scale_factor = get_scale_factor();
|
||||||
|
int64_t W = x->ne[0] / scale_factor;
|
||||||
|
int64_t H = x->ne[1] / scale_factor;
|
||||||
|
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
|
||||||
|
int64_t C = get_encoder_output_channels(static_cast<int>(x->ne[channel_dim]));
|
||||||
|
int64_t ne2;
|
||||||
|
int64_t ne3;
|
||||||
|
if (sd_version_is_wan(version)) {
|
||||||
|
int64_t T = x->ne[2];
|
||||||
|
ne2 = (T - 1) / 4 + 1;
|
||||||
|
ne3 = C;
|
||||||
|
} else {
|
||||||
|
ne2 = C;
|
||||||
|
ne3 = x->ne[3];
|
||||||
|
}
|
||||||
|
result = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, ne2, ne3);
|
||||||
|
|
||||||
|
if (scale_input) {
|
||||||
|
scale_to_minus1_1(x);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) {
|
||||||
|
x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tiling_params.enabled) {
|
||||||
|
float tile_overlap;
|
||||||
|
int tile_size_x, tile_size_y;
|
||||||
|
// multiply tile size for encode to keep the compute buffer size consistent
|
||||||
|
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, W, H, 1.30539f);
|
||||||
|
|
||||||
|
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
|
||||||
|
|
||||||
|
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||||
|
return _compute(n_threads, in, false, &out, work_ctx);
|
||||||
|
};
|
||||||
|
sd_tiling_non_square(x, result, scale_factor, tile_size_x, tile_size_y, tile_overlap, circular_x, circular_y, on_tiling);
|
||||||
|
} else {
|
||||||
|
_compute(n_threads, x, false, &result, work_ctx);
|
||||||
|
}
|
||||||
|
free_compute_buffer();
|
||||||
|
|
||||||
|
int64_t t1 = ggml_time_ms();
|
||||||
|
LOG_DEBUG("computing vae encode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* decode(int n_threads,
|
||||||
|
ggml_context* work_ctx,
|
||||||
|
ggml_tensor* x,
|
||||||
|
sd_tiling_params_t tiling_params,
|
||||||
|
bool decode_video = false,
|
||||||
|
bool circular_x = false,
|
||||||
|
bool circular_y = false,
|
||||||
|
ggml_tensor* result = nullptr,
|
||||||
|
bool silent = false) {
|
||||||
|
const int scale_factor = get_scale_factor();
|
||||||
|
int64_t W = x->ne[0] * scale_factor;
|
||||||
|
int64_t H = x->ne[1] * scale_factor;
|
||||||
|
int64_t C = 3;
|
||||||
|
if (result == nullptr) {
|
||||||
|
if (decode_video) {
|
||||||
|
int64_t T = x->ne[2];
|
||||||
|
if (sd_version_is_wan(version)) {
|
||||||
|
T = ((T - 1) * 4) + 1;
|
||||||
|
}
|
||||||
|
result = ggml_new_tensor_4d(work_ctx,
|
||||||
|
GGML_TYPE_F32,
|
||||||
|
W,
|
||||||
|
H,
|
||||||
|
T,
|
||||||
|
3);
|
||||||
} else {
|
} else {
|
||||||
use_quant = false;
|
result = ggml_new_tensor_4d(work_ctx,
|
||||||
dd_config.z_channels = 16;
|
GGML_TYPE_F32,
|
||||||
|
W,
|
||||||
|
H,
|
||||||
|
C,
|
||||||
|
x->ne[3]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (use_video_decoder) {
|
int64_t t0 = ggml_time_ms();
|
||||||
use_quant = false;
|
if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) {
|
||||||
|
x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]);
|
||||||
}
|
}
|
||||||
blocks["decoder"] = std::shared_ptr<GGMLBlock>(new Decoder(dd_config.ch,
|
if (tiling_params.enabled) {
|
||||||
dd_config.out_ch,
|
float tile_overlap;
|
||||||
dd_config.ch_mult,
|
int tile_size_x, tile_size_y;
|
||||||
dd_config.num_res_blocks,
|
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, x->ne[0], x->ne[1]);
|
||||||
dd_config.z_channels,
|
|
||||||
use_linear_projection,
|
|
||||||
use_video_decoder));
|
|
||||||
if (use_quant) {
|
|
||||||
blocks["post_quant_conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(dd_config.z_channels,
|
|
||||||
embed_dim,
|
|
||||||
{1, 1}));
|
|
||||||
}
|
|
||||||
if (!decode_only) {
|
|
||||||
blocks["encoder"] = std::shared_ptr<GGMLBlock>(new Encoder(dd_config.ch,
|
|
||||||
dd_config.ch_mult,
|
|
||||||
dd_config.num_res_blocks,
|
|
||||||
dd_config.in_channels,
|
|
||||||
dd_config.z_channels,
|
|
||||||
dd_config.double_z,
|
|
||||||
use_linear_projection));
|
|
||||||
if (use_quant) {
|
|
||||||
int factor = dd_config.double_z ? 2 : 1;
|
|
||||||
|
|
||||||
blocks["quant_conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(embed_dim * factor,
|
if (!silent) {
|
||||||
dd_config.z_channels * factor,
|
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
|
||||||
{1, 1}));
|
}
|
||||||
|
|
||||||
|
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||||
|
return _compute(n_threads, in, true, &out, nullptr);
|
||||||
|
};
|
||||||
|
sd_tiling_non_square(x, result, scale_factor, tile_size_x, tile_size_y, tile_overlap, circular_x, circular_y, on_tiling, silent);
|
||||||
|
} else {
|
||||||
|
if (!_compute(n_threads, x, true, &result, work_ctx)) {
|
||||||
|
LOG_ERROR("Failed to decode latetnts");
|
||||||
|
free_compute_buffer();
|
||||||
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
free_compute_buffer();
|
||||||
|
if (scale_input) {
|
||||||
|
scale_to_0_1(result);
|
||||||
|
}
|
||||||
|
int64_t t1 = ggml_time_ms();
|
||||||
|
LOG_DEBUG("computing vae decode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
|
||||||
|
ggml_ext_tensor_clamp_inplace(result, 0.0f, 1.0f);
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) {
|
virtual ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) = 0;
|
||||||
// z: [N, z_channels, h, w]
|
virtual ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) = 0;
|
||||||
if (sd_version_is_flux2(version)) {
|
virtual ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) = 0;
|
||||||
// [N, C*p*p, h, w] -> [N, C, h*p, w*p]
|
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) = 0;
|
||||||
int64_t p = 2;
|
|
||||||
|
|
||||||
int64_t N = z->ne[3];
|
|
||||||
int64_t C = z->ne[2] / p / p;
|
|
||||||
int64_t h = z->ne[1];
|
|
||||||
int64_t w = z->ne[0];
|
|
||||||
int64_t H = h * p;
|
|
||||||
int64_t W = w * p;
|
|
||||||
|
|
||||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, w * h, p * p, C, N); // [N, C, p*p, h*w]
|
|
||||||
z = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, z, 1, 0, 2, 3)); // [N, C, h*w, p*p]
|
|
||||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, p, p, w, h * C * N); // [N*C*h, w, p, p]
|
|
||||||
z = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, z, 0, 2, 1, 3)); // [N*C*h, p, w, p]
|
|
||||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, W, H, C, N); // [N, C, h*p, w*p]
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_quant) {
|
|
||||||
auto post_quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["post_quant_conv"]);
|
|
||||||
z = post_quant_conv->forward(ctx, z); // [N, z_channels, h, w]
|
|
||||||
}
|
|
||||||
auto decoder = std::dynamic_pointer_cast<Decoder>(blocks["decoder"]);
|
|
||||||
|
|
||||||
ggml_set_name(z, "bench-start");
|
|
||||||
auto h = decoder->forward(ctx, z);
|
|
||||||
ggml_set_name(h, "bench-end");
|
|
||||||
return h;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
|
||||||
// x: [N, in_channels, h, w]
|
|
||||||
auto encoder = std::dynamic_pointer_cast<Encoder>(blocks["encoder"]);
|
|
||||||
|
|
||||||
auto z = encoder->forward(ctx, x); // [N, 2*z_channels, h/8, w/8]
|
|
||||||
if (use_quant) {
|
|
||||||
auto quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["quant_conv"]);
|
|
||||||
z = quant_conv->forward(ctx, z); // [N, 2*embed_dim, h/8, w/8]
|
|
||||||
}
|
|
||||||
if (sd_version_is_flux2(version)) {
|
|
||||||
z = ggml_ext_chunk(ctx->ggml_ctx, z, 2, 2)[0];
|
|
||||||
|
|
||||||
// [N, C, H, W] -> [N, C*p*p, H/p, W/p]
|
|
||||||
int64_t p = 2;
|
|
||||||
int64_t N = z->ne[3];
|
|
||||||
int64_t C = z->ne[2];
|
|
||||||
int64_t H = z->ne[1];
|
|
||||||
int64_t W = z->ne[0];
|
|
||||||
int64_t h = H / p;
|
|
||||||
int64_t w = W / p;
|
|
||||||
|
|
||||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, p, w, p, h * C * N); // [N*C*h, p, w, p]
|
|
||||||
z = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, z, 0, 2, 1, 3)); // [N*C*h, w, p, p]
|
|
||||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, p * p, w * h, C, N); // [N, C, h*w, p*p]
|
|
||||||
z = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, z, 1, 0, 2, 3)); // [N, C, p*p, h*w]
|
|
||||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, w, h, p * p * C, N); // [N, C*p*p, h*w]
|
|
||||||
}
|
|
||||||
return z;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct VAE : public GGMLRunner {
|
|
||||||
VAE(ggml_backend_t backend, bool offload_params_to_cpu)
|
|
||||||
: GGMLRunner(backend, offload_params_to_cpu) {}
|
|
||||||
virtual bool compute(const int n_threads,
|
|
||||||
struct ggml_tensor* z,
|
|
||||||
bool decode_graph,
|
|
||||||
struct ggml_tensor** output,
|
|
||||||
struct ggml_context* output_ctx) = 0;
|
|
||||||
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) = 0;
|
|
||||||
virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); };
|
virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); };
|
||||||
};
|
};
|
||||||
|
|
||||||
struct FakeVAE : public VAE {
|
struct FakeVAE : public VAE {
|
||||||
FakeVAE(ggml_backend_t backend, bool offload_params_to_cpu)
|
FakeVAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu)
|
||||||
: VAE(backend, offload_params_to_cpu) {}
|
: VAE(version, backend, offload_params_to_cpu) {}
|
||||||
bool compute(const int n_threads,
|
|
||||||
struct ggml_tensor* z,
|
int get_encoder_output_channels(int input_channels) {
|
||||||
bool decode_graph,
|
return input_channels;
|
||||||
struct ggml_tensor** output,
|
}
|
||||||
struct ggml_context* output_ctx) override {
|
|
||||||
|
bool _compute(const int n_threads,
|
||||||
|
ggml_tensor* z,
|
||||||
|
bool decode_graph,
|
||||||
|
ggml_tensor** output,
|
||||||
|
ggml_context* output_ctx) override {
|
||||||
if (*output == nullptr && output_ctx != nullptr) {
|
if (*output == nullptr && output_ctx != nullptr) {
|
||||||
*output = ggml_dup_tensor(output_ctx, z);
|
*output = ggml_dup_tensor(output_ctx, z);
|
||||||
}
|
}
|
||||||
@ -642,133 +213,23 @@ struct FakeVAE : public VAE {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {}
|
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
|
||||||
|
return vae_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {}
|
||||||
|
|
||||||
std::string get_desc() override {
|
std::string get_desc() override {
|
||||||
return "fake_vae";
|
return "fake_vae";
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AutoEncoderKL : public VAE {
|
#endif // __VAE_HPP__
|
||||||
bool decode_only = true;
|
|
||||||
AutoencodingEngine ae;
|
|
||||||
|
|
||||||
AutoEncoderKL(ggml_backend_t backend,
|
|
||||||
bool offload_params_to_cpu,
|
|
||||||
const String2TensorStorage& tensor_storage_map,
|
|
||||||
const std::string prefix,
|
|
||||||
bool decode_only = false,
|
|
||||||
bool use_video_decoder = false,
|
|
||||||
SDVersion version = VERSION_SD1)
|
|
||||||
: decode_only(decode_only), VAE(backend, offload_params_to_cpu) {
|
|
||||||
bool use_linear_projection = false;
|
|
||||||
for (const auto& [name, tensor_storage] : tensor_storage_map) {
|
|
||||||
if (!starts_with(name, prefix)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (ends_with(name, "attn_1.proj_out.weight")) {
|
|
||||||
if (tensor_storage.n_dims == 2) {
|
|
||||||
use_linear_projection = true;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ae = AutoencodingEngine(version, decode_only, use_linear_projection, use_video_decoder);
|
|
||||||
ae.init(params_ctx, tensor_storage_map, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_conv2d_scale(float scale) override {
|
|
||||||
std::vector<GGMLBlock*> blocks;
|
|
||||||
ae.get_all_blocks(blocks);
|
|
||||||
for (auto block : blocks) {
|
|
||||||
if (block->get_desc() == "Conv2d") {
|
|
||||||
auto conv_block = (Conv2d*)block;
|
|
||||||
conv_block->set_scale(scale);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string get_desc() override {
|
|
||||||
return "vae";
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {
|
|
||||||
ae.get_param_tensors(tensors, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
|
||||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
|
||||||
|
|
||||||
z = to_backend(z);
|
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
|
||||||
|
|
||||||
struct ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
|
||||||
|
|
||||||
return gf;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
|
||||||
struct ggml_tensor* z,
|
|
||||||
bool decode_graph,
|
|
||||||
struct ggml_tensor** output,
|
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
|
||||||
GGML_ASSERT(!decode_only || decode_graph);
|
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
|
||||||
return build_graph(z, decode_graph);
|
|
||||||
};
|
|
||||||
// ggml_set_f32(z, 0.5f);
|
|
||||||
// print_ggml_tensor(z);
|
|
||||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
void test() {
|
|
||||||
struct ggml_init_params params;
|
|
||||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
|
||||||
params.mem_buffer = nullptr;
|
|
||||||
params.no_alloc = false;
|
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
|
||||||
|
|
||||||
{
|
|
||||||
// CPU, x{1, 3, 64, 64}: Pass
|
|
||||||
// CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan
|
|
||||||
// CPU, x{2, 3, 64, 64}: Wrong result
|
|
||||||
// CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result
|
|
||||||
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2);
|
|
||||||
ggml_set_f32(x, 0.5f);
|
|
||||||
print_ggml_tensor(x);
|
|
||||||
struct ggml_tensor* out = nullptr;
|
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
|
||||||
compute(8, x, false, &out, work_ctx);
|
|
||||||
int64_t t1 = ggml_time_ms();
|
|
||||||
|
|
||||||
print_ggml_tensor(out);
|
|
||||||
LOG_DEBUG("encode test done in %lldms", t1 - t0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (false) {
|
|
||||||
// CPU, z{1, 4, 8, 8}: Pass
|
|
||||||
// CUDA, z{1, 4, 8, 8}: Pass
|
|
||||||
// CPU, z{3, 4, 8, 8}: Wrong result
|
|
||||||
// CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result
|
|
||||||
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1);
|
|
||||||
ggml_set_f32(z, 0.5f);
|
|
||||||
print_ggml_tensor(z);
|
|
||||||
struct ggml_tensor* out = nullptr;
|
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
|
||||||
compute(8, z, true, &out, work_ctx);
|
|
||||||
int64_t t1 = ggml_time_ms();
|
|
||||||
|
|
||||||
print_ggml_tensor(out);
|
|
||||||
LOG_DEBUG("decode test done in %lldms", t1 - t0);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|||||||
502
src/wan.hpp
502
src/wan.hpp
@ -25,7 +25,7 @@ namespace WAN {
|
|||||||
std::tuple<int, int, int> dilation;
|
std::tuple<int, int, int> dilation;
|
||||||
bool bias;
|
bool bias;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
params["weight"] = ggml_new_tensor_4d(ctx,
|
params["weight"] = ggml_new_tensor_4d(ctx,
|
||||||
GGML_TYPE_F16,
|
GGML_TYPE_F16,
|
||||||
std::get<2>(kernel_size),
|
std::get<2>(kernel_size),
|
||||||
@ -53,11 +53,11 @@ namespace WAN {
|
|||||||
dilation(std::move(dilation)),
|
dilation(std::move(dilation)),
|
||||||
bias(bias) {}
|
bias(bias) {}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* cache_x = nullptr) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* cache_x = nullptr) {
|
||||||
// x: [N*IC, ID, IH, IW]
|
// x: [N*IC, ID, IH, IW]
|
||||||
// result: x: [N*OC, ID, IH, IW]
|
// result: x: [N*OC, ID, IH, IW]
|
||||||
struct ggml_tensor* w = params["weight"];
|
ggml_tensor* w = params["weight"];
|
||||||
struct ggml_tensor* b = nullptr;
|
ggml_tensor* b = nullptr;
|
||||||
if (bias) {
|
if (bias) {
|
||||||
b = params["bias"];
|
b = params["bias"];
|
||||||
}
|
}
|
||||||
@ -86,7 +86,7 @@ namespace WAN {
|
|||||||
protected:
|
protected:
|
||||||
int64_t dim;
|
int64_t dim;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
ggml_type wtype = GGML_TYPE_F32;
|
ggml_type wtype = GGML_TYPE_F32;
|
||||||
auto iter = tensor_storage_map.find(prefix + "gamma");
|
auto iter = tensor_storage_map.find(prefix + "gamma");
|
||||||
if (iter != tensor_storage_map.end()) {
|
if (iter != tensor_storage_map.end()) {
|
||||||
@ -100,16 +100,16 @@ namespace WAN {
|
|||||||
RMS_norm(int64_t dim)
|
RMS_norm(int64_t dim)
|
||||||
: dim(dim) {}
|
: dim(dim) {}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||||
// x: [N*IC, ID, IH, IW], IC == dim
|
// x: [N*IC, ID, IH, IW], IC == dim
|
||||||
// assert N == 1
|
// assert N == 1
|
||||||
|
|
||||||
struct ggml_tensor* w = params["gamma"];
|
ggml_tensor* w = params["gamma"];
|
||||||
w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w));
|
w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w));
|
||||||
auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC]
|
auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC]
|
||||||
h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f);
|
h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f);
|
||||||
h = ggml_mul(ctx->ggml_ctx, h, w);
|
h = ggml_mul(ctx->ggml_ctx, h, w);
|
||||||
h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, h, 1, 2, 3, 0));
|
h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, h, 1, 2, 3, 0));
|
||||||
|
|
||||||
return h;
|
return h;
|
||||||
}
|
}
|
||||||
@ -148,12 +148,12 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b,
|
int64_t b,
|
||||||
std::vector<struct ggml_tensor*>& feat_cache,
|
std::vector<ggml_tensor*>& feat_cache,
|
||||||
int& feat_idx,
|
int& feat_idx,
|
||||||
int chunk_idx) {
|
int chunk_idx) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
int64_t c = x->ne[3] / b;
|
int64_t c = x->ne[3] / b;
|
||||||
@ -254,9 +254,9 @@ namespace WAN {
|
|||||||
GGML_ASSERT(in_channels * factor % out_channels == 0);
|
GGML_ASSERT(in_channels * factor % out_channels == 0);
|
||||||
group_size = in_channels * factor / out_channels;
|
group_size = in_channels * factor / out_channels;
|
||||||
}
|
}
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t B = 1) {
|
int64_t B = 1) {
|
||||||
// x: [B*IC, T, H, W]
|
// x: [B*IC, T, H, W]
|
||||||
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
|
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
|
||||||
GGML_ASSERT(B == 1);
|
GGML_ASSERT(B == 1);
|
||||||
@ -301,10 +301,10 @@ namespace WAN {
|
|||||||
GGML_ASSERT(out_channels * factor % in_channels == 0);
|
GGML_ASSERT(out_channels * factor % in_channels == 0);
|
||||||
repeats = out_channels * factor / in_channels;
|
repeats = out_channels * factor / in_channels;
|
||||||
}
|
}
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
bool first_chunk = false,
|
bool first_chunk = false,
|
||||||
int64_t B = 1) {
|
int64_t B = 1) {
|
||||||
// x: [B*IC, T, H, W]
|
// x: [B*IC, T, H, W]
|
||||||
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
|
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
|
||||||
GGML_ASSERT(B == 1);
|
GGML_ASSERT(B == 1);
|
||||||
@ -356,14 +356,14 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b,
|
int64_t b,
|
||||||
std::vector<struct ggml_tensor*>& feat_cache,
|
std::vector<ggml_tensor*>& feat_cache,
|
||||||
int& feat_idx) {
|
int& feat_idx) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
struct ggml_tensor* h = x;
|
ggml_tensor* h = x;
|
||||||
if (in_dim != out_dim) {
|
if (in_dim != out_dim) {
|
||||||
auto shortcut = std::dynamic_pointer_cast<CausalConv3d>(blocks["shortcut"]);
|
auto shortcut = std::dynamic_pointer_cast<CausalConv3d>(blocks["shortcut"]);
|
||||||
|
|
||||||
@ -430,15 +430,15 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b,
|
int64_t b,
|
||||||
std::vector<struct ggml_tensor*>& feat_cache,
|
std::vector<ggml_tensor*>& feat_cache,
|
||||||
int& feat_idx,
|
int& feat_idx,
|
||||||
int chunk_idx) {
|
int chunk_idx) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
struct ggml_tensor* x_copy = x;
|
ggml_tensor* x_copy = x;
|
||||||
|
|
||||||
auto avg_shortcut = std::dynamic_pointer_cast<AvgDown3D>(blocks["avg_shortcut"]);
|
auto avg_shortcut = std::dynamic_pointer_cast<AvgDown3D>(blocks["avg_shortcut"]);
|
||||||
|
|
||||||
@ -492,15 +492,15 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b,
|
int64_t b,
|
||||||
std::vector<struct ggml_tensor*>& feat_cache,
|
std::vector<ggml_tensor*>& feat_cache,
|
||||||
int& feat_idx,
|
int& feat_idx,
|
||||||
int chunk_idx) {
|
int chunk_idx) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
struct ggml_tensor* x_copy = x;
|
ggml_tensor* x_copy = x;
|
||||||
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (; i < mult; i++) {
|
for (; i < mult; i++) {
|
||||||
@ -537,9 +537,9 @@ namespace WAN {
|
|||||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Conv2d(dim, dim, {1, 1}));
|
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Conv2d(dim, dim, {1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b) {
|
int64_t b) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
auto norm = std::dynamic_pointer_cast<RMS_norm>(blocks["norm"]);
|
auto norm = std::dynamic_pointer_cast<RMS_norm>(blocks["norm"]);
|
||||||
@ -659,12 +659,12 @@ namespace WAN {
|
|||||||
blocks["head.2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(out_dim, z_dim, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}));
|
blocks["head.2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(out_dim, z_dim, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b,
|
int64_t b,
|
||||||
std::vector<struct ggml_tensor*>& feat_cache,
|
std::vector<ggml_tensor*>& feat_cache,
|
||||||
int& feat_idx,
|
int& feat_idx,
|
||||||
int chunk_idx) {
|
int chunk_idx) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
|
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
|
||||||
@ -830,12 +830,12 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b,
|
int64_t b,
|
||||||
std::vector<struct ggml_tensor*>& feat_cache,
|
std::vector<ggml_tensor*>& feat_cache,
|
||||||
int& feat_idx,
|
int& feat_idx,
|
||||||
int chunk_idx) {
|
int chunk_idx) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
|
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
|
||||||
@ -934,16 +934,16 @@ namespace WAN {
|
|||||||
|
|
||||||
int _conv_num = 33;
|
int _conv_num = 33;
|
||||||
int _conv_idx = 0;
|
int _conv_idx = 0;
|
||||||
std::vector<struct ggml_tensor*> _feat_map;
|
std::vector<ggml_tensor*> _feat_map;
|
||||||
int _enc_conv_num = 28;
|
int _enc_conv_num = 28;
|
||||||
int _enc_conv_idx = 0;
|
int _enc_conv_idx = 0;
|
||||||
std::vector<struct ggml_tensor*> _enc_feat_map;
|
std::vector<ggml_tensor*> _enc_feat_map;
|
||||||
|
|
||||||
void clear_cache() {
|
void clear_cache() {
|
||||||
_conv_idx = 0;
|
_conv_idx = 0;
|
||||||
_feat_map = std::vector<struct ggml_tensor*>(_conv_num, nullptr);
|
_feat_map = std::vector<ggml_tensor*>(_conv_num, nullptr);
|
||||||
_enc_conv_idx = 0;
|
_enc_conv_idx = 0;
|
||||||
_enc_feat_map = std::vector<struct ggml_tensor*>(_enc_conv_num, nullptr);
|
_enc_feat_map = std::vector<ggml_tensor*>(_enc_conv_num, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -966,10 +966,10 @@ namespace WAN {
|
|||||||
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(z_dim, z_dim, {1, 1, 1}));
|
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(z_dim, z_dim, {1, 1, 1}));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* patchify(struct ggml_context* ctx,
|
ggml_tensor* patchify(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t patch_size,
|
int64_t patch_size,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// x: [b*c, f, h*q, w*r]
|
// x: [b*c, f, h*q, w*r]
|
||||||
// return: [b*c*r*q, f, h, w]
|
// return: [b*c*r*q, f, h, w]
|
||||||
if (patch_size == 1) {
|
if (patch_size == 1) {
|
||||||
@ -993,10 +993,10 @@ namespace WAN {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
|
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t patch_size,
|
int64_t patch_size,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// x: [b*c*r*q, f, h, w]
|
// x: [b*c*r*q, f, h, w]
|
||||||
// return: [b*c, f, h*q, w*r]
|
// return: [b*c, f, h*q, w*r]
|
||||||
if (patch_size == 1) {
|
if (patch_size == 1) {
|
||||||
@ -1019,9 +1019,9 @@ namespace WAN {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx,
|
ggml_tensor* encode(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// x: [b*c, t, h, w]
|
// x: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
GGML_ASSERT(decode_only == false);
|
GGML_ASSERT(decode_only == false);
|
||||||
@ -1037,7 +1037,7 @@ namespace WAN {
|
|||||||
|
|
||||||
int64_t t = x->ne[2];
|
int64_t t = x->ne[2];
|
||||||
int64_t iter_ = 1 + (t - 1) / 4;
|
int64_t iter_ = 1 + (t - 1) / 4;
|
||||||
struct ggml_tensor* out;
|
ggml_tensor* out;
|
||||||
for (int i = 0; i < iter_; i++) {
|
for (int i = 0; i < iter_; i++) {
|
||||||
_enc_conv_idx = 0;
|
_enc_conv_idx = 0;
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
@ -1055,9 +1055,9 @@ namespace WAN {
|
|||||||
return mu;
|
return mu;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx,
|
ggml_tensor* decode(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* z,
|
ggml_tensor* z,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// z: [b*c, t, h, w]
|
// z: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
|
|
||||||
@ -1068,7 +1068,7 @@ namespace WAN {
|
|||||||
|
|
||||||
int64_t iter_ = z->ne[2];
|
int64_t iter_ = z->ne[2];
|
||||||
auto x = conv2->forward(ctx, z);
|
auto x = conv2->forward(ctx, z);
|
||||||
struct ggml_tensor* out;
|
ggml_tensor* out;
|
||||||
for (int i = 0; i < iter_; i++) {
|
for (int i = 0; i < iter_; i++) {
|
||||||
_conv_idx = 0;
|
_conv_idx = 0;
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
@ -1087,10 +1087,10 @@ namespace WAN {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* decode_partial(GGMLRunnerContext* ctx,
|
ggml_tensor* decode_partial(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* z,
|
ggml_tensor* z,
|
||||||
int i,
|
int i,
|
||||||
int64_t b = 1) {
|
int64_t b = 1) {
|
||||||
// z: [b*c, t, h, w]
|
// z: [b*c, t, h, w]
|
||||||
GGML_ASSERT(b == 1);
|
GGML_ASSERT(b == 1);
|
||||||
|
|
||||||
@ -1109,7 +1109,8 @@ namespace WAN {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct WanVAERunner : public VAE {
|
struct WanVAERunner : public VAE {
|
||||||
bool decode_only = true;
|
float scale_factor = 1.0f;
|
||||||
|
bool decode_only = true;
|
||||||
WanVAE ae;
|
WanVAE ae;
|
||||||
|
|
||||||
WanVAERunner(ggml_backend_t backend,
|
WanVAERunner(ggml_backend_t backend,
|
||||||
@ -1118,7 +1119,7 @@ namespace WAN {
|
|||||||
const std::string prefix = "",
|
const std::string prefix = "",
|
||||||
bool decode_only = false,
|
bool decode_only = false,
|
||||||
SDVersion version = VERSION_WAN2)
|
SDVersion version = VERSION_WAN2)
|
||||||
: decode_only(decode_only), ae(decode_only, version == VERSION_WAN2_2_TI2V), VAE(backend, offload_params_to_cpu) {
|
: decode_only(decode_only), ae(decode_only, version == VERSION_WAN2_2_TI2V), VAE(version, backend, offload_params_to_cpu) {
|
||||||
ae.init(params_ctx, tensor_storage_map, prefix);
|
ae.init(params_ctx, tensor_storage_map, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1126,26 +1127,121 @@ namespace WAN {
|
|||||||
return "wan_vae";
|
return "wan_vae";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {
|
||||||
ae.get_param_tensors(tensors, prefix);
|
ae.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(10240 * z->ne[2]);
|
return vae_output;
|
||||||
|
}
|
||||||
|
|
||||||
|
void get_latents_mean_std_vec(ggml_tensor* latents, int channel_dim, std::vector<float>& latents_mean_vec, std::vector<float>& latents_std_vec) {
|
||||||
|
GGML_ASSERT(latents->ne[channel_dim] == 16 || latents->ne[channel_dim] == 48);
|
||||||
|
if (latents->ne[channel_dim] == 16) { // Wan2.1 VAE
|
||||||
|
latents_mean_vec = {-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f,
|
||||||
|
0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f};
|
||||||
|
latents_std_vec = {2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f,
|
||||||
|
3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f};
|
||||||
|
} else if (latents->ne[channel_dim] == 48) { // Wan2.2 VAE
|
||||||
|
latents_mean_vec = {-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
|
||||||
|
-0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
|
||||||
|
-0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
|
||||||
|
-0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
|
||||||
|
-0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
|
||||||
|
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f};
|
||||||
|
latents_std_vec = {
|
||||||
|
0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
|
||||||
|
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
|
||||||
|
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
|
||||||
|
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
|
||||||
|
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
|
||||||
|
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
ggml_tensor* vae_latents = ggml_dup(work_ctx, latents);
|
||||||
|
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
|
||||||
|
std::vector<float> latents_mean_vec;
|
||||||
|
std::vector<float> latents_std_vec;
|
||||||
|
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
|
||||||
|
|
||||||
|
float mean;
|
||||||
|
float std_;
|
||||||
|
for (int i = 0; i < latents->ne[3]; i++) {
|
||||||
|
if (channel_dim == 3) {
|
||||||
|
mean = latents_mean_vec[i];
|
||||||
|
std_ = latents_std_vec[i];
|
||||||
|
}
|
||||||
|
for (int j = 0; j < latents->ne[2]; j++) {
|
||||||
|
if (channel_dim == 2) {
|
||||||
|
mean = latents_mean_vec[j];
|
||||||
|
std_ = latents_std_vec[j];
|
||||||
|
}
|
||||||
|
for (int k = 0; k < latents->ne[1]; k++) {
|
||||||
|
for (int l = 0; l < latents->ne[0]; l++) {
|
||||||
|
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
|
||||||
|
value = value * std_ / scale_factor + mean;
|
||||||
|
ggml_ext_tensor_set_f32(vae_latents, value, l, k, j, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return vae_latents;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
|
||||||
|
ggml_tensor* diffusion_latents = ggml_dup(work_ctx, latents);
|
||||||
|
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
|
||||||
|
std::vector<float> latents_mean_vec;
|
||||||
|
std::vector<float> latents_std_vec;
|
||||||
|
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
|
||||||
|
|
||||||
|
float mean;
|
||||||
|
float std_;
|
||||||
|
for (int i = 0; i < latents->ne[3]; i++) {
|
||||||
|
if (channel_dim == 3) {
|
||||||
|
mean = latents_mean_vec[i];
|
||||||
|
std_ = latents_std_vec[i];
|
||||||
|
}
|
||||||
|
for (int j = 0; j < latents->ne[2]; j++) {
|
||||||
|
if (channel_dim == 2) {
|
||||||
|
mean = latents_mean_vec[j];
|
||||||
|
std_ = latents_std_vec[j];
|
||||||
|
}
|
||||||
|
for (int k = 0; k < latents->ne[1]; k++) {
|
||||||
|
for (int l = 0; l < latents->ne[0]; l++) {
|
||||||
|
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
|
||||||
|
value = (value - mean) * scale_factor / std_;
|
||||||
|
ggml_ext_tensor_set_f32(diffusion_latents, value, l, k, j, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return diffusion_latents;
|
||||||
|
}
|
||||||
|
|
||||||
|
int get_encoder_output_channels(int input_channels) {
|
||||||
|
return static_cast<int>(ae.z_dim);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
|
||||||
|
ggml_cgraph* gf = new_graph_custom(10240 * z->ne[2]);
|
||||||
|
|
||||||
z = to_backend(z);
|
z = to_backend(z);
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int i) {
|
ggml_cgraph* build_graph_partial(ggml_tensor* z, bool decode_graph, int i) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(20480);
|
ggml_cgraph* gf = new_graph_custom(20480);
|
||||||
|
|
||||||
ae.clear_cache();
|
ae.clear_cache();
|
||||||
|
|
||||||
@ -1158,7 +1254,7 @@ namespace WAN {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z);
|
ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z);
|
||||||
|
|
||||||
for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) {
|
for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) {
|
||||||
ggml_tensor* feat_cache = ae._feat_map[feat_idx];
|
ggml_tensor* feat_cache = ae._feat_map[feat_idx];
|
||||||
@ -1173,13 +1269,13 @@ namespace WAN {
|
|||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool compute(const int n_threads,
|
bool _compute(const int n_threads,
|
||||||
struct ggml_tensor* z,
|
ggml_tensor* z,
|
||||||
bool decode_graph,
|
bool decode_graph,
|
||||||
struct ggml_tensor** output,
|
ggml_tensor** output,
|
||||||
struct ggml_context* output_ctx = nullptr) override {
|
ggml_context* output_ctx = nullptr) override {
|
||||||
if (true) {
|
if (true) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(z, decode_graph);
|
return build_graph(z, decode_graph);
|
||||||
};
|
};
|
||||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||||
@ -1187,11 +1283,11 @@ namespace WAN {
|
|||||||
ae.clear_cache();
|
ae.clear_cache();
|
||||||
int64_t t = z->ne[2];
|
int64_t t = z->ne[2];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph_partial(z, decode_graph, i);
|
return build_graph_partial(z, decode_graph, i);
|
||||||
};
|
};
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
bool res = GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
|
bool res = GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
|
||||||
ae.clear_cache();
|
ae.clear_cache();
|
||||||
if (t == 1) {
|
if (t == 1) {
|
||||||
*output = out;
|
*output = out;
|
||||||
@ -1229,12 +1325,12 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
|
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
if (true) {
|
if (true) {
|
||||||
@ -1246,10 +1342,10 @@ namespace WAN {
|
|||||||
ggml_set_f32(z, 0.5f);
|
ggml_set_f32(z, 0.5f);
|
||||||
z = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
|
z = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
|
||||||
print_ggml_tensor(z);
|
print_ggml_tensor(z);
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, z, true, &out, work_ctx);
|
_compute(8, z, true, &out, work_ctx);
|
||||||
int64_t t1 = ggml_time_ms();
|
int64_t t1 = ggml_time_ms();
|
||||||
|
|
||||||
print_ggml_tensor(out);
|
print_ggml_tensor(out);
|
||||||
@ -1314,10 +1410,10 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr) {
|
ggml_tensor* mask = nullptr) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// pe: [n_token, d_head/2, 2, 2]
|
// pe: [n_token, d_head/2, 2, 2]
|
||||||
// return [N, n_token, dim]
|
// return [N, n_token, dim]
|
||||||
@ -1355,10 +1451,10 @@ namespace WAN {
|
|||||||
bool qk_norm = true,
|
bool qk_norm = true,
|
||||||
float eps = 1e-6)
|
float eps = 1e-6)
|
||||||
: WanSelfAttention(dim, num_heads, qk_norm, eps) {}
|
: WanSelfAttention(dim, num_heads, qk_norm, eps) {}
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int64_t context_img_len) = 0;
|
int64_t context_img_len) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
class WanT2VCrossAttention : public WanCrossAttention {
|
class WanT2VCrossAttention : public WanCrossAttention {
|
||||||
@ -1368,10 +1464,10 @@ namespace WAN {
|
|||||||
bool qk_norm = true,
|
bool qk_norm = true,
|
||||||
float eps = 1e-6)
|
float eps = 1e-6)
|
||||||
: WanCrossAttention(dim, num_heads, qk_norm, eps) {}
|
: WanCrossAttention(dim, num_heads, qk_norm, eps) {}
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int64_t context_img_len) override {
|
int64_t context_img_len) override {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// context: [N, n_context, dim]
|
// context: [N, n_context, dim]
|
||||||
// context_img_len: unused
|
// context_img_len: unused
|
||||||
@ -1416,10 +1512,10 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int64_t context_img_len) override {
|
int64_t context_img_len) override {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// context: [N, context_img_len + context_txt_len, dim]
|
// context: [N, context_img_len + context_txt_len, dim]
|
||||||
// return [N, n_token, dim]
|
// return [N, n_token, dim]
|
||||||
@ -1464,7 +1560,7 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct ggml_tensor* modulate_add(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* e) {
|
static ggml_tensor* modulate_add(ggml_context* ctx, ggml_tensor* x, ggml_tensor* e) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// e: [N, 1, dim] or [N, T, 1, dim]
|
// e: [N, 1, dim] or [N, T, 1, dim]
|
||||||
if (ggml_n_dims(e) == 3) {
|
if (ggml_n_dims(e) == 3) {
|
||||||
@ -1478,7 +1574,7 @@ namespace WAN {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ggml_tensor* modulate_mul(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* e) {
|
static ggml_tensor* modulate_mul(ggml_context* ctx, ggml_tensor* x, ggml_tensor* e) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// e: [N, 1, dim] or [N, T, 1, dim]
|
// e: [N, 1, dim] or [N, T, 1, dim]
|
||||||
if (ggml_n_dims(e) == 3) {
|
if (ggml_n_dims(e) == 3) {
|
||||||
@ -1496,7 +1592,7 @@ namespace WAN {
|
|||||||
protected:
|
protected:
|
||||||
int64_t dim;
|
int64_t dim;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||||
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
|
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
|
||||||
}
|
}
|
||||||
@ -1530,12 +1626,12 @@ namespace WAN {
|
|||||||
blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim));
|
blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* e,
|
ggml_tensor* e,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int64_t context_img_len = 257) {
|
int64_t context_img_len = 257) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// e: [N, 6, dim] or [N, T, 6, dim]
|
// e: [N, 6, dim] or [N, T, 6, dim]
|
||||||
// context: [N, context_img_len + context_txt_len, dim]
|
// context: [N, context_img_len + context_txt_len, dim]
|
||||||
@ -1584,7 +1680,7 @@ namespace WAN {
|
|||||||
class VaceWanAttentionBlock : public WanAttentionBlock {
|
class VaceWanAttentionBlock : public WanAttentionBlock {
|
||||||
protected:
|
protected:
|
||||||
int block_id;
|
int block_id;
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||||
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
|
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
|
||||||
}
|
}
|
||||||
@ -1606,11 +1702,11 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* c,
|
ggml_tensor* c,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* e,
|
ggml_tensor* e,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
int64_t context_img_len = 257) {
|
int64_t context_img_len = 257) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// e: [N, 6, dim] or [N, T, 6, dim]
|
// e: [N, 6, dim] or [N, T, 6, dim]
|
||||||
@ -1636,7 +1732,7 @@ namespace WAN {
|
|||||||
protected:
|
protected:
|
||||||
int64_t dim;
|
int64_t dim;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||||
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1);
|
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1);
|
||||||
}
|
}
|
||||||
@ -1653,9 +1749,9 @@ namespace WAN {
|
|||||||
blocks["head"] = std::shared_ptr<GGMLBlock>(new Linear(dim, out_dim));
|
blocks["head"] = std::shared_ptr<GGMLBlock>(new Linear(dim, out_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* e) {
|
ggml_tensor* e) {
|
||||||
// x: [N, n_token, dim]
|
// x: [N, n_token, dim]
|
||||||
// e: [N, dim] or [N, T, dim]
|
// e: [N, dim] or [N, T, dim]
|
||||||
// return [N, n_token, out_dim]
|
// return [N, n_token, out_dim]
|
||||||
@ -1683,7 +1779,7 @@ namespace WAN {
|
|||||||
int64_t in_dim;
|
int64_t in_dim;
|
||||||
int64_t flf_pos_embed_token_number;
|
int64_t flf_pos_embed_token_number;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
if (flf_pos_embed_token_number > 0) {
|
if (flf_pos_embed_token_number > 0) {
|
||||||
params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1);
|
params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1);
|
||||||
}
|
}
|
||||||
@ -1701,8 +1797,8 @@ namespace WAN {
|
|||||||
blocks["proj.4"] = std::shared_ptr<GGMLBlock>(new LayerNorm(out_dim));
|
blocks["proj.4"] = std::shared_ptr<GGMLBlock>(new LayerNorm(out_dim));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* image_embeds) {
|
ggml_tensor* image_embeds) {
|
||||||
if (flf_pos_embed_token_number > 0) {
|
if (flf_pos_embed_token_number > 0) {
|
||||||
auto emb_pos = params["emb_pos"];
|
auto emb_pos = params["emb_pos"];
|
||||||
|
|
||||||
@ -1821,8 +1917,8 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
|
ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x) {
|
ggml_tensor* x) {
|
||||||
int64_t W = x->ne[0];
|
int64_t W = x->ne[0];
|
||||||
int64_t H = x->ne[1];
|
int64_t H = x->ne[1];
|
||||||
int64_t T = x->ne[2];
|
int64_t T = x->ne[2];
|
||||||
@ -1834,11 +1930,11 @@ namespace WAN {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
|
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
int64_t t_len,
|
int64_t t_len,
|
||||||
int64_t h_len,
|
int64_t h_len,
|
||||||
int64_t w_len) {
|
int64_t w_len) {
|
||||||
// x: [N, t_len*h_len*w_len, pt*ph*pw*C]
|
// x: [N, t_len*h_len*w_len, pt*ph*pw*C]
|
||||||
// return: [N*C, t_len*pt, h_len*ph, w_len*pw]
|
// return: [N*C, t_len*pt, h_len*ph, w_len*pw]
|
||||||
int64_t N = x->ne[3];
|
int64_t N = x->ne[3];
|
||||||
@ -1861,15 +1957,15 @@ namespace WAN {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* clip_fea = nullptr,
|
ggml_tensor* clip_fea = nullptr,
|
||||||
struct ggml_tensor* vace_context = nullptr,
|
ggml_tensor* vace_context = nullptr,
|
||||||
float vace_strength = 1.f,
|
float vace_strength = 1.f,
|
||||||
int64_t N = 1) {
|
int64_t N = 1) {
|
||||||
// x: [N*C, T, H, W], C => in_dim
|
// x: [N*C, T, H, W], C => in_dim
|
||||||
// vace_context: [N*vace_in_dim, T, H, W]
|
// vace_context: [N*vace_in_dim, T, H, W]
|
||||||
// timestep: [N,] or [T]
|
// timestep: [N,] or [T]
|
||||||
@ -1955,16 +2051,16 @@ namespace WAN {
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* clip_fea = nullptr,
|
ggml_tensor* clip_fea = nullptr,
|
||||||
struct ggml_tensor* time_dim_concat = nullptr,
|
ggml_tensor* time_dim_concat = nullptr,
|
||||||
struct ggml_tensor* vace_context = nullptr,
|
ggml_tensor* vace_context = nullptr,
|
||||||
float vace_strength = 1.f,
|
float vace_strength = 1.f,
|
||||||
int64_t N = 1) {
|
int64_t N = 1) {
|
||||||
// Forward pass of DiT.
|
// Forward pass of DiT.
|
||||||
// x: [N*C, T, H, W]
|
// x: [N*C, T, H, W]
|
||||||
// timestep: [N,]
|
// timestep: [N,]
|
||||||
@ -2129,19 +2225,19 @@ namespace WAN {
|
|||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
wan.get_param_tensors(tensors, prefix);
|
wan.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* clip_fea = nullptr,
|
ggml_tensor* clip_fea = nullptr,
|
||||||
struct ggml_tensor* c_concat = nullptr,
|
ggml_tensor* c_concat = nullptr,
|
||||||
struct ggml_tensor* time_dim_concat = nullptr,
|
ggml_tensor* time_dim_concat = nullptr,
|
||||||
struct ggml_tensor* vace_context = nullptr,
|
ggml_tensor* vace_context = nullptr,
|
||||||
float vace_strength = 1.f) {
|
float vace_strength = 1.f) {
|
||||||
struct ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
timesteps = to_backend(timesteps);
|
timesteps = to_backend(timesteps);
|
||||||
@ -2174,15 +2270,15 @@ namespace WAN {
|
|||||||
|
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = wan.forward(&runner_ctx,
|
ggml_tensor* out = wan.forward(&runner_ctx,
|
||||||
x,
|
x,
|
||||||
timesteps,
|
timesteps,
|
||||||
context,
|
context,
|
||||||
pe,
|
pe,
|
||||||
clip_fea,
|
clip_fea,
|
||||||
time_dim_concat,
|
time_dim_concat,
|
||||||
vace_context,
|
vace_context,
|
||||||
vace_strength);
|
vace_strength);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
@ -2190,17 +2286,17 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* clip_fea = nullptr,
|
ggml_tensor* clip_fea = nullptr,
|
||||||
struct ggml_tensor* c_concat = nullptr,
|
ggml_tensor* c_concat = nullptr,
|
||||||
struct ggml_tensor* time_dim_concat = nullptr,
|
ggml_tensor* time_dim_concat = nullptr,
|
||||||
struct ggml_tensor* vace_context = nullptr,
|
ggml_tensor* vace_context = nullptr,
|
||||||
float vace_strength = 1.f,
|
float vace_strength = 1.f,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
|
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2208,12 +2304,12 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB
|
params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -2236,7 +2332,7 @@ namespace WAN {
|
|||||||
// auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin");
|
// auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin");
|
||||||
// print_ggml_tensor(clip_fea);
|
// print_ggml_tensor(clip_fea);
|
||||||
|
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx);
|
compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx);
|
||||||
|
|||||||
102
src/z_image.hpp
102
src/z_image.hpp
@ -42,10 +42,10 @@ namespace ZImage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr) {
|
ggml_tensor* mask = nullptr) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
int64_t n_token = x->ne[1];
|
int64_t n_token = x->ne[1];
|
||||||
int64_t N = x->ne[2];
|
int64_t N = x->ne[2];
|
||||||
@ -124,23 +124,23 @@ namespace ZImage {
|
|||||||
blocks["w3"] = std::make_shared<Linear>(dim, hidden_dim, false);
|
blocks["w3"] = std::make_shared<Linear>(dim, hidden_dim, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||||
auto w1 = std::dynamic_pointer_cast<Linear>(blocks["w1"]);
|
auto w1 = std::dynamic_pointer_cast<Linear>(blocks["w1"]);
|
||||||
auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]);
|
auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]);
|
||||||
auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]);
|
auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]);
|
||||||
|
|
||||||
auto x1 = w1->forward(ctx, x);
|
auto x1 = w1->forward(ctx, x);
|
||||||
auto x3 = w3->forward(ctx, x);
|
auto x3 = w3->forward(ctx, x);
|
||||||
x = ggml_mul(ctx->ggml_ctx, ggml_silu(ctx->ggml_ctx, x1), x3);
|
x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3);
|
||||||
x = w2->forward(ctx, x);
|
x = w2->forward(ctx, x);
|
||||||
|
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx,
|
__STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* scale) {
|
ggml_tensor* scale) {
|
||||||
// x: [N, L, C]
|
// x: [N, L, C]
|
||||||
// scale: [N, C]
|
// scale: [N, C]
|
||||||
scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C]
|
scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C]
|
||||||
@ -175,11 +175,11 @@ namespace ZImage {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
struct ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
struct ggml_tensor* adaln_input = nullptr) {
|
ggml_tensor* adaln_input = nullptr) {
|
||||||
auto attention = std::dynamic_pointer_cast<JointAttention>(blocks["attention"]);
|
auto attention = std::dynamic_pointer_cast<JointAttention>(blocks["attention"]);
|
||||||
auto feed_forward = std::dynamic_pointer_cast<FeedForward>(blocks["feed_forward"]);
|
auto feed_forward = std::dynamic_pointer_cast<FeedForward>(blocks["feed_forward"]);
|
||||||
auto attention_norm1 = std::dynamic_pointer_cast<RMSNorm>(blocks["attention_norm1"]);
|
auto attention_norm1 = std::dynamic_pointer_cast<RMSNorm>(blocks["attention_norm1"]);
|
||||||
@ -241,9 +241,9 @@ namespace ZImage {
|
|||||||
blocks["adaLN_modulation.1"] = std::make_shared<Linear>(MIN(hidden_size, ADALN_EMBED_DIM), hidden_size);
|
blocks["adaLN_modulation.1"] = std::make_shared<Linear>(MIN(hidden_size, ADALN_EMBED_DIM), hidden_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* c) {
|
ggml_tensor* c) {
|
||||||
// x: [N, n_token, hidden_size]
|
// x: [N, n_token, hidden_size]
|
||||||
// c: [N, hidden_size]
|
// c: [N, hidden_size]
|
||||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||||
@ -284,7 +284,7 @@ namespace ZImage {
|
|||||||
protected:
|
protected:
|
||||||
ZImageParams z_image_params;
|
ZImageParams z_image_params;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||||
params["cap_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
|
params["cap_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
|
||||||
params["x_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
|
params["x_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
|
||||||
}
|
}
|
||||||
@ -346,11 +346,11 @@ namespace ZImage {
|
|||||||
blocks["final_layer"] = std::make_shared<FinalLayer>(z_image_params.hidden_size, z_image_params.patch_size, z_image_params.out_channels);
|
blocks["final_layer"] = std::make_shared<FinalLayer>(z_image_params.hidden_size, z_image_params.patch_size, z_image_params.out_channels);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward_core(GGMLRunnerContext* ctx,
|
ggml_tensor* forward_core(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* pe) {
|
ggml_tensor* pe) {
|
||||||
auto x_embedder = std::dynamic_pointer_cast<Linear>(blocks["x_embedder"]);
|
auto x_embedder = std::dynamic_pointer_cast<Linear>(blocks["x_embedder"]);
|
||||||
auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]);
|
auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]);
|
||||||
auto cap_embedder_0 = std::dynamic_pointer_cast<RMSNorm>(blocks["cap_embedder.0"]);
|
auto cap_embedder_0 = std::dynamic_pointer_cast<RMSNorm>(blocks["cap_embedder.0"]);
|
||||||
@ -414,12 +414,12 @@ namespace ZImage {
|
|||||||
return img;
|
return img;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timestep,
|
ggml_tensor* timestep,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
struct ggml_tensor* pe,
|
ggml_tensor* pe,
|
||||||
std::vector<ggml_tensor*> ref_latents = {}) {
|
std::vector<ggml_tensor*> ref_latents = {}) {
|
||||||
// Forward pass of DiT.
|
// Forward pass of DiT.
|
||||||
// x: [N, C, H, W]
|
// x: [N, C, H, W]
|
||||||
// timestep: [N,]
|
// timestep: [N,]
|
||||||
@ -477,17 +477,17 @@ namespace ZImage {
|
|||||||
return "z_image";
|
return "z_image";
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
z_image.get_param_tensors(tensors, prefix);
|
z_image.get_param_tensors(tensors, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
ggml_cgraph* build_graph(ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
bool increase_ref_index = false) {
|
bool increase_ref_index = false) {
|
||||||
GGML_ASSERT(x->ne[3] == 1);
|
GGML_ASSERT(x->ne[3] == 1);
|
||||||
struct ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
|
ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
|
||||||
|
|
||||||
x = to_backend(x);
|
x = to_backend(x);
|
||||||
context = to_backend(context);
|
context = to_backend(context);
|
||||||
@ -518,12 +518,12 @@ namespace ZImage {
|
|||||||
set_backend_tensor_data(pe, pe_vec.data());
|
set_backend_tensor_data(pe, pe_vec.data());
|
||||||
auto runner_ctx = get_context();
|
auto runner_ctx = get_context();
|
||||||
|
|
||||||
struct ggml_tensor* out = z_image.forward(&runner_ctx,
|
ggml_tensor* out = z_image.forward(&runner_ctx,
|
||||||
x,
|
x,
|
||||||
timesteps,
|
timesteps,
|
||||||
context,
|
context,
|
||||||
pe,
|
pe,
|
||||||
ref_latents);
|
ref_latents);
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
ggml_build_forward_expand(gf, out);
|
||||||
|
|
||||||
@ -531,17 +531,17 @@ namespace ZImage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool compute(int n_threads,
|
bool compute(int n_threads,
|
||||||
struct ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
struct ggml_tensor* timesteps,
|
ggml_tensor* timesteps,
|
||||||
struct ggml_tensor* context,
|
ggml_tensor* context,
|
||||||
std::vector<ggml_tensor*> ref_latents = {},
|
std::vector<ggml_tensor*> ref_latents = {},
|
||||||
bool increase_ref_index = false,
|
bool increase_ref_index = false,
|
||||||
struct ggml_tensor** output = nullptr,
|
ggml_tensor** output = nullptr,
|
||||||
struct ggml_context* output_ctx = nullptr) {
|
ggml_context* output_ctx = nullptr) {
|
||||||
// x: [N, in_channels, h, w]
|
// x: [N, in_channels, h, w]
|
||||||
// timesteps: [N, ]
|
// timesteps: [N, ]
|
||||||
// context: [N, max_position, hidden_size]
|
// context: [N, max_position, hidden_size]
|
||||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
auto get_graph = [&]() -> ggml_cgraph* {
|
||||||
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
|
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -549,12 +549,12 @@ namespace ZImage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void test() {
|
void test() {
|
||||||
struct ggml_init_params params;
|
ggml_init_params params;
|
||||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||||
params.mem_buffer = nullptr;
|
params.mem_buffer = nullptr;
|
||||||
params.no_alloc = false;
|
params.no_alloc = false;
|
||||||
|
|
||||||
struct ggml_context* work_ctx = ggml_init(params);
|
ggml_context* work_ctx = ggml_init(params);
|
||||||
GGML_ASSERT(work_ctx != nullptr);
|
GGML_ASSERT(work_ctx != nullptr);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -571,7 +571,7 @@ namespace ZImage {
|
|||||||
auto context = load_tensor_from_file(work_ctx, "./z_image_context.bin");
|
auto context = load_tensor_from_file(work_ctx, "./z_image_context.bin");
|
||||||
print_ggml_tensor(context);
|
print_ggml_tensor(context);
|
||||||
|
|
||||||
struct ggml_tensor* out = nullptr;
|
ggml_tensor* out = nullptr;
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
int64_t t0 = ggml_time_ms();
|
||||||
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
|
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user