mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2026-05-08 08:18:51 +00:00
Merge branch 'master' into wan2.2_5B_flf2v
This commit is contained in:
commit
6e5fa00c4f
91
.github/workflows/build.yml
vendored
91
.github/workflows/build.yml
vendored
@ -21,11 +21,13 @@ on:
|
||||
"**/*.c",
|
||||
"**/*.cpp",
|
||||
"**/*.cu",
|
||||
"examples/server/frontend/**",
|
||||
]
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
paths:
|
||||
[
|
||||
".github/workflows/**",
|
||||
"**/CMakeLists.txt",
|
||||
"**/Makefile",
|
||||
"**/*.h",
|
||||
@ -33,6 +35,7 @@ on:
|
||||
"**/*.c",
|
||||
"**/*.cpp",
|
||||
"**/*.cu",
|
||||
"examples/server/frontend/**",
|
||||
]
|
||||
|
||||
env:
|
||||
@ -53,6 +56,16 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
@ -70,7 +83,7 @@ jobs:
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Fetch system info
|
||||
id: system-info
|
||||
@ -106,6 +119,16 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
@ -123,7 +146,7 @@ jobs:
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Fetch system info
|
||||
id: system-info
|
||||
@ -162,7 +185,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
variant: [musa, sycl, vulkan]
|
||||
variant: [musa, sycl, vulkan, cuda]
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
@ -174,10 +197,20 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -223,6 +256,16 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Dependencies
|
||||
id: depends
|
||||
run: |
|
||||
@ -240,7 +283,7 @@ jobs:
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Fetch system info
|
||||
id: system-info
|
||||
@ -294,6 +337,16 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Install cuda-toolkit
|
||||
id: cuda-toolkit
|
||||
if: ${{ matrix.build == 'cuda12' }}
|
||||
@ -340,7 +393,7 @@ jobs:
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Pack artifacts
|
||||
id: pack_artifacts
|
||||
@ -399,6 +452,16 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Cache ROCm Installation
|
||||
id: cache-rocm
|
||||
uses: actions/cache@v4
|
||||
@ -463,7 +526,7 @@ jobs:
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Pack artifacts
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
@ -502,6 +565,16 @@ jobs:
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Setup Node
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- name: Free disk space
|
||||
run: |
|
||||
# Remove preinstalled SDKs and caches not needed for this job
|
||||
@ -581,7 +654,7 @@ jobs:
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Prepare artifacts
|
||||
id: prepare_artifacts
|
||||
@ -660,7 +733,7 @@ jobs:
|
||||
|
||||
- name: Get commit hash
|
||||
id: commit
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
uses: prompt/actions-commit-hash@v2
|
||||
|
||||
- name: Create release
|
||||
id: create_release
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +1,6 @@
|
||||
[submodule "ggml"]
|
||||
path = ggml
|
||||
url = https://github.com/ggml-org/ggml.git
|
||||
[submodule "examples/server/frontend"]
|
||||
path = examples/server/frontend
|
||||
url = https://github.com/leejet/stable-ui.git
|
||||
|
||||
@ -36,7 +36,6 @@ option(SD_VULKAN "sd: vulkan backend" OFF)
|
||||
option(SD_OPENCL "sd: opencl backend" OFF)
|
||||
option(SD_SYCL "sd: sycl backend" OFF)
|
||||
option(SD_MUSA "sd: musa backend" OFF)
|
||||
option(SD_FAST_SOFTMAX "sd: x1.5 faster softmax, indeterministic (sometimes, same seed don't generate same image), cuda only" OFF)
|
||||
option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF)
|
||||
option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF)
|
||||
option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF)
|
||||
@ -70,18 +69,12 @@ if (SD_HIPBLAS)
|
||||
message("-- Use HIPBLAS as backend stable-diffusion")
|
||||
set(GGML_HIP ON)
|
||||
add_definitions(-DSD_USE_CUDA)
|
||||
if(SD_FAST_SOFTMAX)
|
||||
set(GGML_CUDA_FAST_SOFTMAX ON)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(SD_MUSA)
|
||||
message("-- Use MUSA as backend stable-diffusion")
|
||||
set(GGML_MUSA ON)
|
||||
add_definitions(-DSD_USE_CUDA)
|
||||
if(SD_FAST_SOFTMAX)
|
||||
set(GGML_CUDA_FAST_SOFTMAX ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(SD_LIB stable-diffusion)
|
||||
|
||||
25
Dockerfile.cuda
Normal file
25
Dockerfile.cuda
Normal file
@ -0,0 +1,25 @@
|
||||
ARG CUDA_VERSION=12.6.3
|
||||
ARG UBUNTU_VERSION=24.04
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-devel-ubuntu${UBUNTU_VERSION} AS build
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends build-essential git ccache cmake
|
||||
|
||||
WORKDIR /sd.cpp
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG CUDACXX=/usr/local/cuda/bin/nvcc
|
||||
RUN cmake . -B ./build -DSD_CUDA=ON
|
||||
RUN cmake --build ./build --config Release -j$(nproc)
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-cudnn-runtime-ubuntu${UBUNTU_VERSION} AS runtime
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install --yes --no-install-recommends libgomp1 && \
|
||||
apt-get clean
|
||||
|
||||
COPY --from=build /sd.cpp/build/bin/sd-cli /sd-cli
|
||||
COPY --from=build /sd.cpp/build/bin/sd-server /sd-server
|
||||
|
||||
ENTRYPOINT [ "/sd-cli" ]
|
||||
@ -5,6 +5,7 @@
|
||||
- Download Anima
|
||||
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/diffusion_models
|
||||
- gguf: https://huggingface.co/Bedovyy/Anima-GGUF/tree/main
|
||||
- gguf Anima2: https://huggingface.co/JusteLeo/Anima2-GGUF/tree/main
|
||||
- Download vae
|
||||
- safetensors: https://huggingface.co/circlestone-labs/Anima/tree/main/split_files/vae
|
||||
- Download Qwen3-0.6B-Base
|
||||
|
||||
@ -11,6 +11,7 @@ Caching methods accelerate diffusion inference by reusing intermediate computati
|
||||
| `dbcache` | DiT models | Block-level L1 residual threshold |
|
||||
| `taylorseer` | DiT models | Taylor series approximation |
|
||||
| `cache-dit` | DiT models | Combined DBCache + TaylorSeer |
|
||||
| `spectrum` | UNET and DiT models | Chebyshev + Taylor output forecasting |
|
||||
|
||||
### UCache (UNET Models)
|
||||
|
||||
@ -79,7 +80,7 @@ Uses Taylor series approximation to predict block outputs:
|
||||
Combines DBCache and TaylorSeer:
|
||||
|
||||
```bash
|
||||
--cache-mode cache-dit --cache-preset fast
|
||||
--cache-mode cache-dit
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
@ -91,14 +92,6 @@ Combines DBCache and TaylorSeer:
|
||||
| `threshold` | L1 residual difference threshold | 0.08 |
|
||||
| `warmup` | Steps before caching starts | 8 |
|
||||
|
||||
#### Presets
|
||||
|
||||
Available presets: `slow`, `medium`, `fast`, `ultra` (or `s`, `m`, `f`, `u`).
|
||||
|
||||
```bash
|
||||
--cache-mode cache-dit --cache-preset fast
|
||||
```
|
||||
|
||||
#### SCM Options
|
||||
|
||||
Steps Computation Mask controls which steps can be cached:
|
||||
@ -118,6 +111,28 @@ Mask values: `1` = compute, `0` = can cache.
|
||||
--scm-policy dynamic
|
||||
```
|
||||
|
||||
### Spectrum (UNET and DiT Models)
|
||||
|
||||
Spectrum uses Chebyshev polynomial fitting blended with Taylor extrapolation to predict denoised outputs, skipping entire forward passes. Based on the paper [Spectrum: Adaptive Spectral Feature Forecasting for Efficient Diffusion Sampling](https://github.com/tingyu215/Spectrum).
|
||||
|
||||
```bash
|
||||
sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-----------|-------------|---------|
|
||||
| `w` | Chebyshev vs Taylor blend weight (0=Taylor, 1=Chebyshev) | 0.40 |
|
||||
| `m` | Chebyshev polynomial degree | 3 |
|
||||
| `lam` | Ridge regression regularization | 1.0 |
|
||||
| `window` | Initial window size (compute every N steps) | 2 |
|
||||
| `flex` | Window growth per computed step after warmup | 0.50 |
|
||||
| `warmup` | Steps to always compute before caching starts | 4 |
|
||||
| `stop` | Stop caching at this fraction of total steps | 0.9 |
|
||||
|
||||
```
|
||||
|
||||
### Performance Tips
|
||||
|
||||
- Start with default thresholds and adjust based on output quality
|
||||
|
||||
@ -138,11 +138,12 @@ Generation Options:
|
||||
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
||||
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
||||
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)
|
||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level),
|
||||
'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
|
||||
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
|
||||
"threshold=0.25" or "threshold=1.5,reset=0"
|
||||
--cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'
|
||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=;
|
||||
spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples:
|
||||
"threshold=0.25" or "threshold=1.5,reset=0" or "w=0.4,window=2"
|
||||
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
||||
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
||||
```
|
||||
|
||||
@ -601,7 +601,7 @@ int main(int argc, const char* argv[]) {
|
||||
|
||||
if (gen_params.end_image_path.size() > 0) {
|
||||
vae_decode_only = false;
|
||||
if (!load_image_and_update_size(gen_params.init_image_path, end_image)) {
|
||||
if (!load_image_and_update_size(gen_params.end_image_path, end_image)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1047,7 +1047,6 @@ struct SDGenerationParams {
|
||||
|
||||
std::string cache_mode;
|
||||
std::string cache_option;
|
||||
std::string cache_preset;
|
||||
std::string scm_mask;
|
||||
bool scm_policy_dynamic = true;
|
||||
sd_cache_params_t cache_params{};
|
||||
@ -1422,8 +1421,8 @@ struct SDGenerationParams {
|
||||
}
|
||||
cache_mode = argv_to_utf8(index, argv);
|
||||
if (cache_mode != "easycache" && cache_mode != "ucache" &&
|
||||
cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit") {
|
||||
fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', or 'cache-dit'\n", cache_mode.c_str());
|
||||
cache_mode != "dbcache" && cache_mode != "taylorseer" && cache_mode != "cache-dit" && cache_mode != "spectrum") {
|
||||
fprintf(stderr, "error: invalid cache mode '%s', must be 'easycache', 'ucache', 'dbcache', 'taylorseer', 'cache-dit', or 'spectrum'\n", cache_mode.c_str());
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
@ -1461,21 +1460,6 @@ struct SDGenerationParams {
|
||||
return 1;
|
||||
};
|
||||
|
||||
auto on_cache_preset_arg = [&](int argc, const char** argv, int index) {
|
||||
if (++index >= argc) {
|
||||
return -1;
|
||||
}
|
||||
cache_preset = argv_to_utf8(index, argv);
|
||||
if (cache_preset != "slow" && cache_preset != "s" && cache_preset != "S" &&
|
||||
cache_preset != "medium" && cache_preset != "m" && cache_preset != "M" &&
|
||||
cache_preset != "fast" && cache_preset != "f" && cache_preset != "F" &&
|
||||
cache_preset != "ultra" && cache_preset != "u" && cache_preset != "U") {
|
||||
fprintf(stderr, "error: invalid cache preset '%s', must be 'slow'/'s', 'medium'/'m', 'fast'/'f', or 'ultra'/'u'\n", cache_preset.c_str());
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
};
|
||||
|
||||
options.manual_options = {
|
||||
{"-s",
|
||||
"--seed",
|
||||
@ -1513,16 +1497,12 @@ struct SDGenerationParams {
|
||||
on_ref_image_arg},
|
||||
{"",
|
||||
"--cache-mode",
|
||||
"caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)",
|
||||
"caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)",
|
||||
on_cache_mode_arg},
|
||||
{"",
|
||||
"--cache-option",
|
||||
"named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"",
|
||||
"named cache params (key=value format, comma-separated). easycache/ucache: threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=; spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples: \"threshold=0.25\" or \"threshold=1.5,reset=0\"",
|
||||
on_cache_option_arg},
|
||||
{"",
|
||||
"--cache-preset",
|
||||
"cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'",
|
||||
on_cache_preset_arg},
|
||||
{"",
|
||||
"--scm-mask",
|
||||
"SCM steps mask for cache-dit: comma-separated 0/1 (e.g., \"1,1,1,0,0,1,0,0,1,0\") - 1=compute, 0=can cache",
|
||||
@ -1575,7 +1555,6 @@ struct SDGenerationParams {
|
||||
load_if_exists("negative_prompt", negative_prompt);
|
||||
load_if_exists("cache_mode", cache_mode);
|
||||
load_if_exists("cache_option", cache_option);
|
||||
load_if_exists("cache_preset", cache_preset);
|
||||
load_if_exists("scm_mask", scm_mask);
|
||||
|
||||
load_if_exists("clip_skip", clip_skip);
|
||||
@ -1779,7 +1758,23 @@ struct SDGenerationParams {
|
||||
} else if (key == "Bn" || key == "bn") {
|
||||
cache_params.Bn_compute_blocks = std::stoi(val);
|
||||
} else if (key == "warmup") {
|
||||
if (cache_mode == "spectrum") {
|
||||
cache_params.spectrum_warmup_steps = std::stoi(val);
|
||||
} else {
|
||||
cache_params.max_warmup_steps = std::stoi(val);
|
||||
}
|
||||
} else if (key == "w") {
|
||||
cache_params.spectrum_w = std::stof(val);
|
||||
} else if (key == "m") {
|
||||
cache_params.spectrum_m = std::stoi(val);
|
||||
} else if (key == "lam") {
|
||||
cache_params.spectrum_lam = std::stof(val);
|
||||
} else if (key == "window") {
|
||||
cache_params.spectrum_window_size = std::stoi(val);
|
||||
} else if (key == "flex") {
|
||||
cache_params.spectrum_flex_window = std::stof(val);
|
||||
} else if (key == "stop") {
|
||||
cache_params.spectrum_stop_percent = std::stof(val);
|
||||
} else {
|
||||
LOG_ERROR("error: unknown cache parameter '%s'", key.c_str());
|
||||
return false;
|
||||
@ -1795,38 +1790,16 @@ struct SDGenerationParams {
|
||||
if (!cache_mode.empty()) {
|
||||
if (cache_mode == "easycache") {
|
||||
cache_params.mode = SD_CACHE_EASYCACHE;
|
||||
cache_params.reuse_threshold = 0.2f;
|
||||
cache_params.start_percent = 0.15f;
|
||||
cache_params.end_percent = 0.95f;
|
||||
cache_params.error_decay_rate = 1.0f;
|
||||
cache_params.use_relative_threshold = true;
|
||||
cache_params.reset_error_on_compute = true;
|
||||
} else if (cache_mode == "ucache") {
|
||||
cache_params.mode = SD_CACHE_UCACHE;
|
||||
cache_params.reuse_threshold = 1.0f;
|
||||
cache_params.start_percent = 0.15f;
|
||||
cache_params.end_percent = 0.95f;
|
||||
cache_params.error_decay_rate = 1.0f;
|
||||
cache_params.use_relative_threshold = true;
|
||||
cache_params.reset_error_on_compute = true;
|
||||
} else if (cache_mode == "dbcache") {
|
||||
cache_params.mode = SD_CACHE_DBCACHE;
|
||||
cache_params.Fn_compute_blocks = 8;
|
||||
cache_params.Bn_compute_blocks = 0;
|
||||
cache_params.residual_diff_threshold = 0.08f;
|
||||
cache_params.max_warmup_steps = 8;
|
||||
} else if (cache_mode == "taylorseer") {
|
||||
cache_params.mode = SD_CACHE_TAYLORSEER;
|
||||
cache_params.Fn_compute_blocks = 8;
|
||||
cache_params.Bn_compute_blocks = 0;
|
||||
cache_params.residual_diff_threshold = 0.08f;
|
||||
cache_params.max_warmup_steps = 8;
|
||||
} else if (cache_mode == "cache-dit") {
|
||||
cache_params.mode = SD_CACHE_CACHE_DIT;
|
||||
cache_params.Fn_compute_blocks = 8;
|
||||
cache_params.Bn_compute_blocks = 0;
|
||||
cache_params.residual_diff_threshold = 0.08f;
|
||||
cache_params.max_warmup_steps = 8;
|
||||
} else if (cache_mode == "spectrum") {
|
||||
cache_params.mode = SD_CACHE_SPECTRUM;
|
||||
}
|
||||
|
||||
if (!cache_option.empty()) {
|
||||
|
||||
@ -1,6 +1,73 @@
|
||||
set(TARGET sd-server)
|
||||
|
||||
option(SD_SERVER_BUILD_FRONTEND "Build server frontend with pnpm" ON)
|
||||
|
||||
set(FRONTEND_DIR "${CMAKE_CURRENT_SOURCE_DIR}/frontend")
|
||||
set(GENERATED_HTML_HEADER "${FRONTEND_DIR}/dist/gen_index_html.h")
|
||||
|
||||
set(HAVE_FRONTEND_BUILD OFF)
|
||||
|
||||
if(SD_SERVER_BUILD_FRONTEND AND EXISTS "${FRONTEND_DIR}")
|
||||
if(WIN32)
|
||||
find_program(PNPM_EXECUTABLE NAMES pnpm.cmd pnpm)
|
||||
else()
|
||||
find_program(PNPM_EXECUTABLE NAMES pnpm)
|
||||
endif()
|
||||
|
||||
if(PNPM_EXECUTABLE)
|
||||
message(STATUS "Frontend dir found: ${FRONTEND_DIR}")
|
||||
message(STATUS "pnpm found: ${PNPM_EXECUTABLE}")
|
||||
|
||||
set(HAVE_FRONTEND_BUILD ON)
|
||||
|
||||
add_custom_target(${TARGET}_frontend_install
|
||||
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" install
|
||||
WORKING_DIRECTORY "${FRONTEND_DIR}"
|
||||
COMMENT "Installing frontend dependencies"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_target(${TARGET}_frontend_build
|
||||
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" run build
|
||||
WORKING_DIRECTORY "${FRONTEND_DIR}"
|
||||
COMMENT "Building frontend"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_custom_target(${TARGET}_frontend_header
|
||||
COMMAND "${PNPM_EXECUTABLE}" -C "${FRONTEND_DIR}" run build:header
|
||||
WORKING_DIRECTORY "${FRONTEND_DIR}"
|
||||
COMMENT "Generating gen_index_html.h"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
add_dependencies(${TARGET}_frontend_build ${TARGET}_frontend_install)
|
||||
add_dependencies(${TARGET}_frontend_header ${TARGET}_frontend_build)
|
||||
|
||||
add_custom_target(${TARGET}_frontend
|
||||
DEPENDS ${TARGET}_frontend_header
|
||||
)
|
||||
|
||||
set_source_files_properties("${GENERATED_HTML_HEADER}" PROPERTIES GENERATED TRUE)
|
||||
else()
|
||||
message(WARNING "pnpm not found, frontend build disabled")
|
||||
endif()
|
||||
else()
|
||||
message(STATUS "Frontend disabled or directory not found: ${FRONTEND_DIR}")
|
||||
endif()
|
||||
|
||||
add_executable(${TARGET} main.cpp)
|
||||
|
||||
if(HAVE_FRONTEND_BUILD)
|
||||
add_dependencies(${TARGET} ${TARGET}_frontend)
|
||||
target_sources(${TARGET} PRIVATE "${GENERATED_HTML_HEADER}")
|
||||
target_include_directories(${TARGET} PRIVATE "${FRONTEND_DIR}/dist")
|
||||
target_compile_definitions(${TARGET} PRIVATE HAVE_INDEX_HTML)
|
||||
message(STATUS "HAVE_INDEX_HTML enabled")
|
||||
else()
|
||||
message(STATUS "HAVE_INDEX_HTML disabled")
|
||||
endif()
|
||||
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE stable-diffusion ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PUBLIC c_std_11 cxx_std_17)
|
||||
@ -1,3 +1,92 @@
|
||||
# Frontend
|
||||
|
||||
## Build with Frontend
|
||||
|
||||
The server can optionally build the web frontend and embed it into the binary as `gen_index_html.h`.
|
||||
|
||||
### Requirements
|
||||
|
||||
Install the following tools:
|
||||
|
||||
* **Node.js** ≥ 22.18
|
||||
https://nodejs.org/
|
||||
|
||||
* **pnpm** ≥ 10
|
||||
Install via npm:
|
||||
|
||||
```bash
|
||||
npm install -g pnpm
|
||||
```
|
||||
|
||||
Verify installation:
|
||||
|
||||
```bash
|
||||
node -v
|
||||
pnpm -v
|
||||
```
|
||||
|
||||
### Install frontend dependencies
|
||||
|
||||
Go to the frontend directory and install dependencies:
|
||||
|
||||
```bash
|
||||
cd examples/server/frontend
|
||||
pnpm install
|
||||
```
|
||||
|
||||
### Build the server with CMake
|
||||
|
||||
Enable the frontend build option when configuring CMake:
|
||||
|
||||
```bash
|
||||
cmake -B build -DSD_SERVER_BUILD_FRONTEND=ON
|
||||
cmake --build build --config Release
|
||||
```
|
||||
|
||||
If `pnpm` is available, the build system will automatically run:
|
||||
|
||||
```
|
||||
pnpm run build
|
||||
pnpm run build:header
|
||||
```
|
||||
|
||||
and embed the generated frontend into the server binary.
|
||||
|
||||
## Frontend Repository
|
||||
|
||||
The web frontend is maintained in a **separate repository**, https://github.com/leejet/stable-ui.
|
||||
|
||||
If you want to modify the UI or frontend logic, please submit pull requests to the **frontend repository**.
|
||||
|
||||
This repository (`stable-diffusion.cpp`) only vendors the frontend periodically. Changes from the frontend repo are synchronized:
|
||||
|
||||
* approximately **every 1–2 weeks**, or
|
||||
* when there are **major frontend updates**
|
||||
|
||||
Because of this, frontend changes will **not appear here immediately** after being merged upstream.
|
||||
|
||||
## Using an external frontend
|
||||
|
||||
By default, the server uses the **embedded frontend** generated during the build (`gen_index_html.h`).
|
||||
|
||||
You can also serve a custom frontend file instead of the embedded one by using:
|
||||
|
||||
```bash
|
||||
--serve-html-path <path-to-index.html>
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
sd-server --serve-html-path ./index.html
|
||||
```
|
||||
|
||||
In this case, the server will load and serve the specified `index.html` file instead of the embedded frontend. This is useful when:
|
||||
|
||||
* developing or testing frontend changes
|
||||
* using a custom UI
|
||||
* avoiding rebuilding the binary after frontend modifications
|
||||
|
||||
# Run
|
||||
|
||||
```
|
||||
@ -129,11 +218,10 @@ Default Generation Options:
|
||||
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
||||
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
||||
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level)
|
||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
|
||||
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
|
||||
"threshold=0.25" or "threshold=1.5,reset=0"
|
||||
--cache-preset cache-dit preset: 'slow'/'s', 'medium'/'m', 'fast'/'f', 'ultra'/'u'
|
||||
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
||||
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
||||
```
|
||||
|
||||
1
examples/server/frontend
Submodule
1
examples/server/frontend
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 1a34176cd6d39ad3a226b2b69047e71f6797f6bc
|
||||
@ -13,6 +13,10 @@
|
||||
|
||||
#include "common/common.hpp"
|
||||
|
||||
#ifdef HAVE_INDEX_HTML
|
||||
#include "frontend/dist/gen_index_html.h"
|
||||
#endif
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
// ----------------------- helpers -----------------------
|
||||
@ -380,7 +384,13 @@ int main(int argc, const char** argv) {
|
||||
return httplib::Server::HandlerResponse::Unhandled;
|
||||
});
|
||||
|
||||
// root
|
||||
// index html
|
||||
std::string index_html;
|
||||
#ifdef HAVE_INDEX_HTML
|
||||
index_html.assign(reinterpret_cast<const char*>(index_html_bytes), index_html_size);
|
||||
#else
|
||||
index_html = "Stable Diffusion Server is running";
|
||||
#endif
|
||||
svr.Get("/", [&](const httplib::Request&, httplib::Response& res) {
|
||||
if (!svr_params.serve_html_path.empty()) {
|
||||
std::ifstream file(svr_params.serve_html_path);
|
||||
@ -392,7 +402,7 @@ int main(int argc, const char** argv) {
|
||||
res.set_content("Error: Unable to read HTML file", "text/plain");
|
||||
}
|
||||
} else {
|
||||
res.set_content("Stable Diffusion Server is running", "text/plain");
|
||||
res.set_content(index_html, "text/html");
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@ -251,6 +251,7 @@ enum sd_cache_mode_t {
|
||||
SD_CACHE_DBCACHE,
|
||||
SD_CACHE_TAYLORSEER,
|
||||
SD_CACHE_CACHE_DIT,
|
||||
SD_CACHE_SPECTRUM,
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
@ -271,6 +272,13 @@ typedef struct {
|
||||
int taylorseer_skip_interval;
|
||||
const char* scm_mask;
|
||||
bool scm_policy_dynamic;
|
||||
float spectrum_w;
|
||||
int spectrum_m;
|
||||
float spectrum_lam;
|
||||
int spectrum_window_size;
|
||||
float spectrum_flex_window;
|
||||
int spectrum_warmup_steps;
|
||||
float spectrum_stop_percent;
|
||||
} sd_cache_params_t;
|
||||
|
||||
typedef struct {
|
||||
|
||||
143
src/anima.hpp
143
src/anima.hpp
@ -13,9 +13,9 @@
|
||||
namespace Anima {
|
||||
constexpr int ANIMA_GRAPH_SIZE = 65536;
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* apply_gate(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* gate) {
|
||||
__STATIC_INLINE__ ggml_tensor* apply_gate(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* gate) {
|
||||
gate = ggml_reshape_3d(ctx, gate, gate->ne[0], 1, gate->ne[1]); // [N, 1, C]
|
||||
return ggml_mul(ctx, x, gate);
|
||||
}
|
||||
@ -26,7 +26,7 @@ namespace Anima {
|
||||
blocks["proj.1"] = std::make_shared<Linear>(in_dim, out_dim, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj.1"]);
|
||||
return proj->forward(ctx, x);
|
||||
}
|
||||
@ -39,7 +39,7 @@ namespace Anima {
|
||||
blocks["1.linear_2"] = std::make_shared<Linear>(in_dim, out_dim, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_1"]);
|
||||
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["1.linear_2"]);
|
||||
|
||||
@ -62,10 +62,10 @@ namespace Anima {
|
||||
blocks["2"] = std::make_shared<Linear>(hidden_features, 3 * in_features, false);
|
||||
}
|
||||
|
||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* hidden_states,
|
||||
struct ggml_tensor* embedded_timestep,
|
||||
struct ggml_tensor* temb = nullptr) {
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* hidden_states,
|
||||
ggml_tensor* embedded_timestep,
|
||||
ggml_tensor* temb = nullptr) {
|
||||
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
|
||||
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
|
||||
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||
@ -102,10 +102,10 @@ namespace Anima {
|
||||
blocks["2"] = std::make_shared<Linear>(hidden_features, 2 * in_features, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* hidden_states,
|
||||
struct ggml_tensor* embedded_timestep,
|
||||
struct ggml_tensor* temb = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* hidden_states,
|
||||
ggml_tensor* embedded_timestep,
|
||||
ggml_tensor* temb = nullptr) {
|
||||
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["norm"]);
|
||||
auto linear_1 = std::dynamic_pointer_cast<Linear>(blocks["1"]);
|
||||
auto linear_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||
@ -152,11 +152,11 @@ namespace Anima {
|
||||
blocks[this->out_proj_name] = std::make_shared<Linear>(inner_dim, query_dim, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* hidden_states,
|
||||
struct ggml_tensor* encoder_hidden_states = nullptr,
|
||||
struct ggml_tensor* pe_q = nullptr,
|
||||
struct ggml_tensor* pe_k = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* hidden_states,
|
||||
ggml_tensor* encoder_hidden_states = nullptr,
|
||||
ggml_tensor* pe_q = nullptr,
|
||||
ggml_tensor* pe_k = nullptr) {
|
||||
if (encoder_hidden_states == nullptr) {
|
||||
encoder_hidden_states = hidden_states;
|
||||
}
|
||||
@ -183,7 +183,7 @@ namespace Anima {
|
||||
q4 = q_norm->forward(ctx, q4);
|
||||
k4 = k_norm->forward(ctx, k4);
|
||||
|
||||
struct ggml_tensor* attn_out = nullptr;
|
||||
ggml_tensor* attn_out = nullptr;
|
||||
if (pe_q != nullptr || pe_k != nullptr) {
|
||||
if (pe_q == nullptr) {
|
||||
pe_q = pe_k;
|
||||
@ -227,7 +227,7 @@ namespace Anima {
|
||||
blocks["layer2"] = std::make_shared<Linear>(hidden_dim, dim, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto layer1 = std::dynamic_pointer_cast<Linear>(blocks["layer1"]);
|
||||
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["layer2"]);
|
||||
|
||||
@ -245,7 +245,7 @@ namespace Anima {
|
||||
blocks["2"] = std::make_shared<Linear>(hidden_dim, dim, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto layer0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
|
||||
auto layer2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||
|
||||
@ -267,11 +267,11 @@ namespace Anima {
|
||||
blocks["mlp"] = std::make_shared<AdapterMLP>(model_dim, model_dim * 4);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* target_pe,
|
||||
struct ggml_tensor* context_pe) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* target_pe,
|
||||
ggml_tensor* context_pe) {
|
||||
auto norm_self_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_self_attn"]);
|
||||
auto self_attn = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
|
||||
auto norm_cross_attn = std::dynamic_pointer_cast<RMSNorm>(blocks["norm_cross_attn"]);
|
||||
@ -317,11 +317,11 @@ namespace Anima {
|
||||
blocks["norm"] = std::make_shared<RMSNorm>(target_dim, 1e-6f);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* source_hidden_states,
|
||||
struct ggml_tensor* target_input_ids,
|
||||
struct ggml_tensor* target_pe,
|
||||
struct ggml_tensor* source_pe) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* source_hidden_states,
|
||||
ggml_tensor* target_input_ids,
|
||||
ggml_tensor* target_pe,
|
||||
ggml_tensor* source_pe) {
|
||||
GGML_ASSERT(target_input_ids != nullptr);
|
||||
if (ggml_n_dims(target_input_ids) == 1) {
|
||||
target_input_ids = ggml_reshape_2d(ctx->ggml_ctx, target_input_ids, target_input_ids->ne[0], 1);
|
||||
@ -360,12 +360,12 @@ namespace Anima {
|
||||
blocks["mlp"] = std::make_shared<AnimaMLP>(hidden_size, hidden_size * mlp_ratio);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* hidden_states,
|
||||
struct ggml_tensor* encoder_hidden_states,
|
||||
struct ggml_tensor* embedded_timestep,
|
||||
struct ggml_tensor* temb,
|
||||
struct ggml_tensor* image_pe) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* hidden_states,
|
||||
ggml_tensor* encoder_hidden_states,
|
||||
ggml_tensor* embedded_timestep,
|
||||
ggml_tensor* temb,
|
||||
ggml_tensor* image_pe) {
|
||||
auto norm1 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_self_attn"]);
|
||||
auto attn1 = std::dynamic_pointer_cast<AnimaAttention>(blocks["self_attn"]);
|
||||
auto norm2 = std::dynamic_pointer_cast<AdaLayerNormZero>(blocks["adaln_modulation_cross_attn"]);
|
||||
@ -402,10 +402,10 @@ namespace Anima {
|
||||
blocks["linear"] = std::make_shared<Linear>(hidden_size, patch_size * patch_size * out_channels, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* hidden_states,
|
||||
struct ggml_tensor* embedded_timestep,
|
||||
struct ggml_tensor* temb) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* hidden_states,
|
||||
ggml_tensor* embedded_timestep,
|
||||
ggml_tensor* temb) {
|
||||
auto adaln = std::dynamic_pointer_cast<AdaLayerNorm>(blocks["adaln_modulation"]);
|
||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
||||
|
||||
@ -445,15 +445,15 @@ namespace Anima {
|
||||
blocks["llm_adapter"] = std::make_shared<LLMAdapter>(1024, 1024, 1024, 6, 16);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* encoder_hidden_states,
|
||||
struct ggml_tensor* image_pe,
|
||||
struct ggml_tensor* t5_ids = nullptr,
|
||||
struct ggml_tensor* t5_weights = nullptr,
|
||||
struct ggml_tensor* adapter_q_pe = nullptr,
|
||||
struct ggml_tensor* adapter_k_pe = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* encoder_hidden_states,
|
||||
ggml_tensor* image_pe,
|
||||
ggml_tensor* t5_ids = nullptr,
|
||||
ggml_tensor* t5_weights = nullptr,
|
||||
ggml_tensor* adapter_q_pe = nullptr,
|
||||
ggml_tensor* adapter_k_pe = nullptr) {
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
|
||||
auto x_embedder = std::dynamic_pointer_cast<XEmbedder>(blocks["x_embedder"]);
|
||||
@ -553,7 +553,7 @@ namespace Anima {
|
||||
return "anima";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
net.get_param_tensors(tensors, prefix + ".net");
|
||||
}
|
||||
|
||||
@ -602,19 +602,18 @@ namespace Anima {
|
||||
return Rope::embed_nd(ids, bs, axis_thetas, axes_dim);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* t5_ids = nullptr,
|
||||
struct ggml_tensor* t5_weights = nullptr) {
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor = {},
|
||||
const sd::Tensor<int32_t>& t5_ids_tensor = {},
|
||||
const sd::Tensor<float>& t5_weights_tensor = {}) {
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
ggml_tensor* context = make_optional_input(context_tensor);
|
||||
ggml_tensor* t5_ids = make_optional_input(t5_ids_tensor);
|
||||
ggml_tensor* t5_weights = make_optional_input(t5_weights_tensor);
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
struct ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
|
||||
|
||||
x = to_backend(x);
|
||||
timesteps = to_backend(timesteps);
|
||||
context = to_backend(context);
|
||||
t5_ids = to_backend(t5_ids);
|
||||
t5_weights = to_backend(t5_weights);
|
||||
ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
|
||||
|
||||
int64_t pad_h = (net.patch_size - x->ne[1] % net.patch_size) % net.patch_size;
|
||||
int64_t pad_w = (net.patch_size - x->ne[0] % net.patch_size) % net.patch_size;
|
||||
@ -667,18 +666,16 @@ namespace Anima {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* t5_ids = nullptr,
|
||||
struct ggml_tensor* t5_weights = nullptr,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context = {},
|
||||
const sd::Tensor<int32_t>& t5_ids = {},
|
||||
const sd::Tensor<float>& t5_weights = {}) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, t5_ids, t5_weights);
|
||||
};
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
}
|
||||
};
|
||||
} // namespace Anima
|
||||
|
||||
852
src/auto_encoder_kl.hpp
Normal file
852
src/auto_encoder_kl.hpp
Normal file
@ -0,0 +1,852 @@
|
||||
#ifndef __AUTO_ENCODER_KL_HPP__
|
||||
#define __AUTO_ENCODER_KL_HPP__
|
||||
|
||||
#include "vae.hpp"
|
||||
|
||||
/*================================================== AutoEncoderKL ===================================================*/
|
||||
|
||||
#define VAE_GRAPH_SIZE 20480
|
||||
|
||||
class ResnetBlock : public UnaryBlock {
|
||||
protected:
|
||||
int64_t in_channels;
|
||||
int64_t out_channels;
|
||||
|
||||
public:
|
||||
ResnetBlock(int64_t in_channels,
|
||||
int64_t out_channels)
|
||||
: in_channels(in_channels),
|
||||
out_channels(out_channels) {
|
||||
// temb_channels is always 0
|
||||
blocks["norm1"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(in_channels));
|
||||
blocks["conv1"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
|
||||
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(out_channels));
|
||||
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new Conv2d(out_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
|
||||
if (out_channels != in_channels) {
|
||||
blocks["nin_shortcut"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, {1, 1}));
|
||||
}
|
||||
}
|
||||
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N, in_channels, h, w]
|
||||
// t_emb is always None
|
||||
auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]);
|
||||
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv1"]);
|
||||
auto norm2 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm2"]);
|
||||
auto conv2 = std::dynamic_pointer_cast<Conv2d>(blocks["conv2"]);
|
||||
|
||||
auto h = x;
|
||||
h = norm1->forward(ctx, h);
|
||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // swish
|
||||
h = conv1->forward(ctx, h);
|
||||
// return h;
|
||||
|
||||
h = norm2->forward(ctx, h);
|
||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // swish
|
||||
// dropout, skip for inference
|
||||
h = conv2->forward(ctx, h);
|
||||
|
||||
// skip connection
|
||||
if (out_channels != in_channels) {
|
||||
auto nin_shortcut = std::dynamic_pointer_cast<Conv2d>(blocks["nin_shortcut"]);
|
||||
|
||||
x = nin_shortcut->forward(ctx, x); // [N, out_channels, h, w]
|
||||
}
|
||||
|
||||
h = ggml_add(ctx->ggml_ctx, h, x);
|
||||
return h; // [N, out_channels, h, w]
|
||||
}
|
||||
};
|
||||
|
||||
class AttnBlock : public UnaryBlock {
|
||||
protected:
|
||||
int64_t in_channels;
|
||||
bool use_linear;
|
||||
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
||||
auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
|
||||
if (iter != tensor_storage_map.end()) {
|
||||
if (iter->second.n_dims == 4 && use_linear) {
|
||||
use_linear = false;
|
||||
blocks["q"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||
blocks["k"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||
blocks["v"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||
blocks["proj_out"] = std::make_shared<Conv2d>(in_channels, in_channels, std::pair{1, 1});
|
||||
} else if (iter->second.n_dims == 2 && !use_linear) {
|
||||
use_linear = true;
|
||||
blocks["q"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||
blocks["k"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||
blocks["v"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||
blocks["proj_out"] = std::make_shared<Linear>(in_channels, in_channels);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
AttnBlock(int64_t in_channels, bool use_linear)
|
||||
: in_channels(in_channels), use_linear(use_linear) {
|
||||
blocks["norm"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(in_channels));
|
||||
if (use_linear) {
|
||||
blocks["q"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||
blocks["k"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||
blocks["v"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(in_channels, in_channels));
|
||||
} else {
|
||||
blocks["q"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||
blocks["k"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||
blocks["v"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
|
||||
}
|
||||
}
|
||||
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N, in_channels, h, w]
|
||||
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
|
||||
auto q_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["q"]);
|
||||
auto k_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["k"]);
|
||||
auto v_proj = std::dynamic_pointer_cast<UnaryBlock>(blocks["v"]);
|
||||
auto proj_out = std::dynamic_pointer_cast<UnaryBlock>(blocks["proj_out"]);
|
||||
|
||||
auto h_ = norm->forward(ctx, x);
|
||||
|
||||
const int64_t n = h_->ne[3];
|
||||
const int64_t c = h_->ne[2];
|
||||
const int64_t h = h_->ne[1];
|
||||
const int64_t w = h_->ne[0];
|
||||
|
||||
ggml_tensor* q;
|
||||
ggml_tensor* k;
|
||||
ggml_tensor* v;
|
||||
if (use_linear) {
|
||||
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||
h_ = ggml_reshape_3d(ctx->ggml_ctx, h_, c, h * w, n); // [N, h * w, in_channels]
|
||||
|
||||
q = q_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
||||
k = k_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
||||
v = v_proj->forward(ctx, h_); // [N, h * w, in_channels]
|
||||
} else {
|
||||
q = q_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
||||
q = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, q, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||
q = ggml_reshape_3d(ctx->ggml_ctx, q, c, h * w, n); // [N, h * w, in_channels]
|
||||
|
||||
k = k_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
||||
k = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, k, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||
k = ggml_reshape_3d(ctx->ggml_ctx, k, c, h * w, n); // [N, h * w, in_channels]
|
||||
|
||||
v = v_proj->forward(ctx, h_); // [N, in_channels, h, w]
|
||||
v = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, v, 1, 2, 0, 3)); // [N, h, w, in_channels]
|
||||
v = ggml_reshape_3d(ctx->ggml_ctx, v, c, h * w, n); // [N, h * w, in_channels]
|
||||
}
|
||||
|
||||
h_ = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, 1, nullptr, false, ctx->flash_attn_enabled);
|
||||
|
||||
if (use_linear) {
|
||||
h_ = proj_out->forward(ctx, h_); // [N, h * w, in_channels]
|
||||
|
||||
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 0, 2, 3)); // [N, in_channels, h * w]
|
||||
h_ = ggml_reshape_4d(ctx->ggml_ctx, h_, w, h, c, n); // [N, in_channels, h, w]
|
||||
} else {
|
||||
h_ = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, h_, 1, 0, 2, 3)); // [N, in_channels, h * w]
|
||||
h_ = ggml_reshape_4d(ctx->ggml_ctx, h_, w, h, c, n); // [N, in_channels, h, w]
|
||||
|
||||
h_ = proj_out->forward(ctx, h_); // [N, in_channels, h, w]
|
||||
}
|
||||
|
||||
h_ = ggml_add(ctx->ggml_ctx, h_, x);
|
||||
return h_;
|
||||
}
|
||||
};
|
||||
|
||||
class AE3DConv : public Conv2d {
|
||||
public:
|
||||
AE3DConv(int64_t in_channels,
|
||||
int64_t out_channels,
|
||||
std::pair<int, int> kernel_size,
|
||||
int video_kernel_size = 3,
|
||||
std::pair<int, int> stride = {1, 1},
|
||||
std::pair<int, int> padding = {0, 0},
|
||||
std::pair<int, int> dilation = {1, 1},
|
||||
bool bias = true)
|
||||
: Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias) {
|
||||
int kernel_padding = video_kernel_size / 2;
|
||||
blocks["time_mix_conv"] = std::shared_ptr<GGMLBlock>(new Conv3d(out_channels,
|
||||
out_channels,
|
||||
{video_kernel_size, 1, 1},
|
||||
{1, 1, 1},
|
||||
{kernel_padding, 0, 0}));
|
||||
}
|
||||
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x) override {
|
||||
// timesteps always None
|
||||
// skip_video always False
|
||||
// x: [N, IC, IH, IW]
|
||||
// result: [N, OC, OH, OW]
|
||||
auto time_mix_conv = std::dynamic_pointer_cast<Conv3d>(blocks["time_mix_conv"]);
|
||||
|
||||
x = Conv2d::forward(ctx, x);
|
||||
// timesteps = x.shape[0]
|
||||
// x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps)
|
||||
// x = conv3d(x)
|
||||
// return rearrange(x, "b c t h w -> (b t) c h w")
|
||||
int64_t T = x->ne[3];
|
||||
int64_t B = x->ne[3] / T;
|
||||
int64_t C = x->ne[2];
|
||||
int64_t H = x->ne[1];
|
||||
int64_t W = x->ne[0];
|
||||
|
||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W * H, C, T, B); // (b t) c h w -> b t c (h w)
|
||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b t c (h w) -> b c t (h w)
|
||||
x = time_mix_conv->forward(ctx, x); // [B, OC, T, OH * OW]
|
||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
|
||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w
|
||||
return x; // [B*T, OC, OH, OW]
|
||||
}
|
||||
};
|
||||
|
||||
class VideoResnetBlock : public ResnetBlock {
|
||||
protected:
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_storage_map, GGML_TYPE_F32);
|
||||
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
||||
}
|
||||
|
||||
float get_alpha() {
|
||||
float alpha = ggml_ext_backend_tensor_get_f32(params["mix_factor"]);
|
||||
return sigmoid(alpha);
|
||||
}
|
||||
|
||||
public:
|
||||
VideoResnetBlock(int64_t in_channels,
|
||||
int64_t out_channels,
|
||||
int video_kernel_size = 3)
|
||||
: ResnetBlock(in_channels, out_channels) {
|
||||
// merge_strategy is always learned
|
||||
blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true));
|
||||
}
|
||||
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w]
|
||||
// return: [N, out_channels, h, w] aka [b*t, out_channels, h, w]
|
||||
// t_emb is always None
|
||||
// skip_video is always False
|
||||
// timesteps is always None
|
||||
auto time_stack = std::dynamic_pointer_cast<ResBlock>(blocks["time_stack"]);
|
||||
|
||||
x = ResnetBlock::forward(ctx, x); // [N, out_channels, h, w]
|
||||
// return x;
|
||||
|
||||
int64_t T = x->ne[3];
|
||||
int64_t B = x->ne[3] / T;
|
||||
int64_t C = x->ne[2];
|
||||
int64_t H = x->ne[1];
|
||||
int64_t W = x->ne[0];
|
||||
|
||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W * H, C, T, B); // (b t) c h w -> b t c (h w)
|
||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b t c (h w) -> b c t (h w)
|
||||
auto x_mix = x;
|
||||
|
||||
x = time_stack->forward(ctx, x); // b t c (h w)
|
||||
|
||||
float alpha = get_alpha();
|
||||
x = ggml_add(ctx->ggml_ctx,
|
||||
ggml_ext_scale(ctx->ggml_ctx, x, alpha),
|
||||
ggml_ext_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha));
|
||||
|
||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
|
||||
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w
|
||||
|
||||
return x;
|
||||
}
|
||||
};
|
||||
|
||||
// ldm.modules.diffusionmodules.model.Encoder
|
||||
class Encoder : public GGMLBlock {
|
||||
protected:
|
||||
int ch = 128;
|
||||
std::vector<int> ch_mult = {1, 2, 4, 4};
|
||||
int num_res_blocks = 2;
|
||||
int in_channels = 3;
|
||||
int z_channels = 4;
|
||||
bool double_z = true;
|
||||
|
||||
public:
|
||||
Encoder(int ch,
|
||||
std::vector<int> ch_mult,
|
||||
int num_res_blocks,
|
||||
int in_channels,
|
||||
int z_channels,
|
||||
bool double_z = true,
|
||||
bool use_linear_projection = false)
|
||||
: ch(ch),
|
||||
ch_mult(ch_mult),
|
||||
num_res_blocks(num_res_blocks),
|
||||
in_channels(in_channels),
|
||||
z_channels(z_channels),
|
||||
double_z(double_z) {
|
||||
blocks["conv_in"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, ch, {3, 3}, {1, 1}, {1, 1}));
|
||||
|
||||
size_t num_resolutions = ch_mult.size();
|
||||
|
||||
int block_in = 1;
|
||||
for (int i = 0; i < num_resolutions; i++) {
|
||||
if (i == 0) {
|
||||
block_in = ch;
|
||||
} else {
|
||||
block_in = ch * ch_mult[i - 1];
|
||||
}
|
||||
int block_out = ch * ch_mult[i];
|
||||
for (int j = 0; j < num_res_blocks; j++) {
|
||||
std::string name = "down." + std::to_string(i) + ".block." + std::to_string(j);
|
||||
blocks[name] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_out));
|
||||
block_in = block_out;
|
||||
}
|
||||
if (i != num_resolutions - 1) {
|
||||
std::string name = "down." + std::to_string(i) + ".downsample";
|
||||
blocks[name] = std::shared_ptr<GGMLBlock>(new DownSampleBlock(block_in, block_in, true));
|
||||
}
|
||||
}
|
||||
|
||||
blocks["mid.block_1"] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_in));
|
||||
blocks["mid.attn_1"] = std::shared_ptr<GGMLBlock>(new AttnBlock(block_in, use_linear_projection));
|
||||
blocks["mid.block_2"] = std::shared_ptr<GGMLBlock>(new ResnetBlock(block_in, block_in));
|
||||
|
||||
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(block_in));
|
||||
blocks["conv_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(block_in, double_z ? z_channels * 2 : z_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, in_channels, h, w]
|
||||
|
||||
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
|
||||
auto mid_block_1 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_1"]);
|
||||
auto mid_attn_1 = std::dynamic_pointer_cast<AttnBlock>(blocks["mid.attn_1"]);
|
||||
auto mid_block_2 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_2"]);
|
||||
auto norm_out = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm_out"]);
|
||||
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
||||
|
||||
auto h = conv_in->forward(ctx, x); // [N, ch, h, w]
|
||||
|
||||
// downsampling
|
||||
size_t num_resolutions = ch_mult.size();
|
||||
for (int i = 0; i < num_resolutions; i++) {
|
||||
for (int j = 0; j < num_res_blocks; j++) {
|
||||
std::string name = "down." + std::to_string(i) + ".block." + std::to_string(j);
|
||||
auto down_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
||||
|
||||
h = down_block->forward(ctx, h);
|
||||
}
|
||||
if (i != num_resolutions - 1) {
|
||||
std::string name = "down." + std::to_string(i) + ".downsample";
|
||||
auto down_sample = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
||||
|
||||
h = down_sample->forward(ctx, h);
|
||||
}
|
||||
}
|
||||
|
||||
// middle
|
||||
h = mid_block_1->forward(ctx, h);
|
||||
h = mid_attn_1->forward(ctx, h);
|
||||
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
||||
|
||||
// end
|
||||
h = norm_out->forward(ctx, h);
|
||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // nonlinearity/swish
|
||||
h = conv_out->forward(ctx, h); // [N, z_channels*2, h, w]
|
||||
return h;
|
||||
}
|
||||
};
|
||||
|
||||
// ldm.modules.diffusionmodules.model.Decoder
|
||||
class Decoder : public GGMLBlock {
|
||||
protected:
|
||||
int ch = 128;
|
||||
int out_ch = 3;
|
||||
std::vector<int> ch_mult = {1, 2, 4, 4};
|
||||
int num_res_blocks = 2;
|
||||
int z_channels = 4;
|
||||
bool video_decoder = false;
|
||||
int video_kernel_size = 3;
|
||||
|
||||
virtual std::shared_ptr<GGMLBlock> get_conv_out(int64_t in_channels,
|
||||
int64_t out_channels,
|
||||
std::pair<int, int> kernel_size,
|
||||
std::pair<int, int> stride = {1, 1},
|
||||
std::pair<int, int> padding = {0, 0}) {
|
||||
if (video_decoder) {
|
||||
return std::shared_ptr<GGMLBlock>(new AE3DConv(in_channels, out_channels, kernel_size, video_kernel_size, stride, padding));
|
||||
} else {
|
||||
return std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, out_channels, kernel_size, stride, padding));
|
||||
}
|
||||
}
|
||||
|
||||
virtual std::shared_ptr<GGMLBlock> get_resnet_block(int64_t in_channels,
|
||||
int64_t out_channels) {
|
||||
if (video_decoder) {
|
||||
return std::shared_ptr<GGMLBlock>(new VideoResnetBlock(in_channels, out_channels, video_kernel_size));
|
||||
} else {
|
||||
return std::shared_ptr<GGMLBlock>(new ResnetBlock(in_channels, out_channels));
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
Decoder(int ch,
|
||||
int out_ch,
|
||||
std::vector<int> ch_mult,
|
||||
int num_res_blocks,
|
||||
int z_channels,
|
||||
bool use_linear_projection = false,
|
||||
bool video_decoder = false,
|
||||
int video_kernel_size = 3)
|
||||
: ch(ch),
|
||||
out_ch(out_ch),
|
||||
ch_mult(ch_mult),
|
||||
num_res_blocks(num_res_blocks),
|
||||
z_channels(z_channels),
|
||||
video_decoder(video_decoder),
|
||||
video_kernel_size(video_kernel_size) {
|
||||
int num_resolutions = static_cast<int>(ch_mult.size());
|
||||
int block_in = ch * ch_mult[num_resolutions - 1];
|
||||
|
||||
blocks["conv_in"] = std::shared_ptr<GGMLBlock>(new Conv2d(z_channels, block_in, {3, 3}, {1, 1}, {1, 1}));
|
||||
|
||||
blocks["mid.block_1"] = get_resnet_block(block_in, block_in);
|
||||
blocks["mid.attn_1"] = std::shared_ptr<GGMLBlock>(new AttnBlock(block_in, use_linear_projection));
|
||||
blocks["mid.block_2"] = get_resnet_block(block_in, block_in);
|
||||
|
||||
for (int i = num_resolutions - 1; i >= 0; i--) {
|
||||
int mult = ch_mult[i];
|
||||
int block_out = ch * mult;
|
||||
for (int j = 0; j < num_res_blocks + 1; j++) {
|
||||
std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j);
|
||||
blocks[name] = get_resnet_block(block_in, block_out);
|
||||
|
||||
block_in = block_out;
|
||||
}
|
||||
if (i != 0) {
|
||||
std::string name = "up." + std::to_string(i) + ".upsample";
|
||||
blocks[name] = std::shared_ptr<GGMLBlock>(new UpSampleBlock(block_in, block_in));
|
||||
}
|
||||
}
|
||||
|
||||
blocks["norm_out"] = std::shared_ptr<GGMLBlock>(new GroupNorm32(block_in));
|
||||
blocks["conv_out"] = get_conv_out(block_in, out_ch, {3, 3}, {1, 1}, {1, 1});
|
||||
}
|
||||
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||
// z: [N, z_channels, h, w]
|
||||
// alpha is always 0
|
||||
// merge_strategy is always learned
|
||||
// time_mode is always conv-only, so we need to replace conv_out_op/resnet_op to AE3DConv/VideoResBlock
|
||||
// AttnVideoBlock will not be used
|
||||
auto conv_in = std::dynamic_pointer_cast<Conv2d>(blocks["conv_in"]);
|
||||
auto mid_block_1 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_1"]);
|
||||
auto mid_attn_1 = std::dynamic_pointer_cast<AttnBlock>(blocks["mid.attn_1"]);
|
||||
auto mid_block_2 = std::dynamic_pointer_cast<ResnetBlock>(blocks["mid.block_2"]);
|
||||
auto norm_out = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm_out"]);
|
||||
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
||||
|
||||
// conv_in
|
||||
auto h = conv_in->forward(ctx, z); // [N, block_in, h, w]
|
||||
|
||||
// middle
|
||||
h = mid_block_1->forward(ctx, h);
|
||||
// return h;
|
||||
|
||||
h = mid_attn_1->forward(ctx, h);
|
||||
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
||||
|
||||
// upsampling
|
||||
int num_resolutions = static_cast<int>(ch_mult.size());
|
||||
for (int i = num_resolutions - 1; i >= 0; i--) {
|
||||
for (int j = 0; j < num_res_blocks + 1; j++) {
|
||||
std::string name = "up." + std::to_string(i) + ".block." + std::to_string(j);
|
||||
auto up_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
||||
|
||||
h = up_block->forward(ctx, h);
|
||||
}
|
||||
if (i != 0) {
|
||||
std::string name = "up." + std::to_string(i) + ".upsample";
|
||||
auto up_sample = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]);
|
||||
|
||||
h = up_sample->forward(ctx, h);
|
||||
}
|
||||
}
|
||||
|
||||
h = norm_out->forward(ctx, h);
|
||||
h = ggml_silu_inplace(ctx->ggml_ctx, h); // nonlinearity/swish
|
||||
h = conv_out->forward(ctx, h); // [N, out_ch, h*8, w*8]
|
||||
return h;
|
||||
}
|
||||
};
|
||||
|
||||
// ldm.models.autoencoder.AutoencoderKL
|
||||
class AutoEncoderKLModel : public GGMLBlock {
|
||||
protected:
|
||||
SDVersion version;
|
||||
bool decode_only = true;
|
||||
bool use_video_decoder = false;
|
||||
bool use_quant = true;
|
||||
int embed_dim = 4;
|
||||
struct {
|
||||
int z_channels = 4;
|
||||
int resolution = 256;
|
||||
int in_channels = 3;
|
||||
int out_ch = 3;
|
||||
int ch = 128;
|
||||
std::vector<int> ch_mult = {1, 2, 4, 4};
|
||||
int num_res_blocks = 2;
|
||||
bool double_z = true;
|
||||
} dd_config;
|
||||
|
||||
public:
|
||||
AutoEncoderKLModel(SDVersion version = VERSION_SD1,
|
||||
bool decode_only = true,
|
||||
bool use_linear_projection = false,
|
||||
bool use_video_decoder = false)
|
||||
: version(version), decode_only(decode_only), use_video_decoder(use_video_decoder) {
|
||||
if (sd_version_is_dit(version)) {
|
||||
if (sd_version_is_flux2(version)) {
|
||||
dd_config.z_channels = 32;
|
||||
embed_dim = 32;
|
||||
} else {
|
||||
use_quant = false;
|
||||
dd_config.z_channels = 16;
|
||||
}
|
||||
}
|
||||
if (use_video_decoder) {
|
||||
use_quant = false;
|
||||
}
|
||||
blocks["decoder"] = std::shared_ptr<GGMLBlock>(new Decoder(dd_config.ch,
|
||||
dd_config.out_ch,
|
||||
dd_config.ch_mult,
|
||||
dd_config.num_res_blocks,
|
||||
dd_config.z_channels,
|
||||
use_linear_projection,
|
||||
use_video_decoder));
|
||||
if (use_quant) {
|
||||
blocks["post_quant_conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(dd_config.z_channels,
|
||||
embed_dim,
|
||||
{1, 1}));
|
||||
}
|
||||
if (!decode_only) {
|
||||
blocks["encoder"] = std::shared_ptr<GGMLBlock>(new Encoder(dd_config.ch,
|
||||
dd_config.ch_mult,
|
||||
dd_config.num_res_blocks,
|
||||
dd_config.in_channels,
|
||||
dd_config.z_channels,
|
||||
dd_config.double_z,
|
||||
use_linear_projection));
|
||||
if (use_quant) {
|
||||
int factor = dd_config.double_z ? 2 : 1;
|
||||
|
||||
blocks["quant_conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(embed_dim * factor,
|
||||
dd_config.z_channels * factor,
|
||||
{1, 1}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||
// z: [N, z_channels, h, w]
|
||||
if (sd_version_is_flux2(version)) {
|
||||
// [N, C*p*p, h, w] -> [N, C, h*p, w*p]
|
||||
int64_t p = 2;
|
||||
|
||||
int64_t N = z->ne[3];
|
||||
int64_t C = z->ne[2] / p / p;
|
||||
int64_t h = z->ne[1];
|
||||
int64_t w = z->ne[0];
|
||||
int64_t H = h * p;
|
||||
int64_t W = w * p;
|
||||
|
||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, w * h, p * p, C, N); // [N, C, p*p, h*w]
|
||||
z = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, z, 1, 0, 2, 3)); // [N, C, h*w, p*p]
|
||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, p, p, w, h * C * N); // [N*C*h, w, p, p]
|
||||
z = ggml_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, z, 0, 2, 1, 3)); // [N*C*h, p, w, p]
|
||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, W, H, C, N); // [N, C, h*p, w*p]
|
||||
}
|
||||
|
||||
if (use_quant) {
|
||||
auto post_quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["post_quant_conv"]);
|
||||
z = post_quant_conv->forward(ctx, z); // [N, z_channels, h, w]
|
||||
}
|
||||
auto decoder = std::dynamic_pointer_cast<Decoder>(blocks["decoder"]);
|
||||
|
||||
ggml_set_name(z, "bench-start");
|
||||
auto h = decoder->forward(ctx, z);
|
||||
ggml_set_name(h, "bench-end");
|
||||
return h;
|
||||
}
|
||||
|
||||
ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, in_channels, h, w]
|
||||
auto encoder = std::dynamic_pointer_cast<Encoder>(blocks["encoder"]);
|
||||
|
||||
auto z = encoder->forward(ctx, x); // [N, 2*z_channels, h/8, w/8]
|
||||
if (use_quant) {
|
||||
auto quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["quant_conv"]);
|
||||
z = quant_conv->forward(ctx, z); // [N, 2*embed_dim, h/8, w/8]
|
||||
}
|
||||
if (sd_version_is_flux2(version)) {
|
||||
z = ggml_ext_chunk(ctx->ggml_ctx, z, 2, 2)[0];
|
||||
|
||||
// [N, C, H, W] -> [N, C*p*p, H/p, W/p]
|
||||
int64_t p = 2;
|
||||
int64_t N = z->ne[3];
|
||||
int64_t C = z->ne[2];
|
||||
int64_t H = z->ne[1];
|
||||
int64_t W = z->ne[0];
|
||||
int64_t h = H / p;
|
||||
int64_t w = W / p;
|
||||
|
||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, p, w, p, h * C * N); // [N*C*h, p, w, p]
|
||||
z = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, z, 0, 2, 1, 3)); // [N*C*h, w, p, p]
|
||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, p * p, w * h, C, N); // [N, C, h*w, p*p]
|
||||
z = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, z, 1, 0, 2, 3)); // [N, C, p*p, h*w]
|
||||
z = ggml_reshape_4d(ctx->ggml_ctx, z, w, h, p * p * C, N); // [N, C*p*p, h*w]
|
||||
}
|
||||
return z;
|
||||
}
|
||||
|
||||
int get_encoder_output_channels() {
|
||||
int factor = dd_config.double_z ? 2 : 1;
|
||||
if (sd_version_is_flux2(version)) {
|
||||
return dd_config.z_channels * 4;
|
||||
}
|
||||
return dd_config.z_channels * factor;
|
||||
}
|
||||
};
|
||||
|
||||
struct AutoEncoderKL : public VAE {
|
||||
float scale_factor = 1.f;
|
||||
float shift_factor = 0.f;
|
||||
bool decode_only = true;
|
||||
AutoEncoderKLModel ae;
|
||||
|
||||
AutoEncoderKL(ggml_backend_t backend,
|
||||
bool offload_params_to_cpu,
|
||||
const String2TensorStorage& tensor_storage_map,
|
||||
const std::string prefix,
|
||||
bool decode_only = false,
|
||||
bool use_video_decoder = false,
|
||||
SDVersion version = VERSION_SD1)
|
||||
: decode_only(decode_only), VAE(version, backend, offload_params_to_cpu) {
|
||||
if (sd_version_is_sd1(version) || sd_version_is_sd2(version)) {
|
||||
scale_factor = 0.18215f;
|
||||
shift_factor = 0.f;
|
||||
} else if (sd_version_is_sdxl(version)) {
|
||||
scale_factor = 0.13025f;
|
||||
shift_factor = 0.f;
|
||||
} else if (sd_version_is_sd3(version)) {
|
||||
scale_factor = 1.5305f;
|
||||
shift_factor = 0.0609f;
|
||||
} else if (sd_version_is_flux(version) || sd_version_is_z_image(version)) {
|
||||
scale_factor = 0.3611f;
|
||||
shift_factor = 0.1159f;
|
||||
} else if (sd_version_is_flux2(version)) {
|
||||
scale_factor = 1.0f;
|
||||
shift_factor = 0.f;
|
||||
}
|
||||
bool use_linear_projection = false;
|
||||
for (const auto& [name, tensor_storage] : tensor_storage_map) {
|
||||
if (!starts_with(name, prefix)) {
|
||||
continue;
|
||||
}
|
||||
if (ends_with(name, "attn_1.proj_out.weight")) {
|
||||
if (tensor_storage.n_dims == 2) {
|
||||
use_linear_projection = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
ae = AutoEncoderKLModel(version, decode_only, use_linear_projection, use_video_decoder);
|
||||
ae.init(params_ctx, tensor_storage_map, prefix);
|
||||
}
|
||||
|
||||
void set_conv2d_scale(float scale) override {
|
||||
std::vector<GGMLBlock*> blocks;
|
||||
ae.get_all_blocks(blocks);
|
||||
for (auto block : blocks) {
|
||||
if (block->get_desc() == "Conv2d") {
|
||||
auto conv_block = (Conv2d*)block;
|
||||
conv_block->set_scale(scale);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string get_desc() override {
|
||||
return "vae";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {
|
||||
ae.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
|
||||
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
ggml_tensor* z = make_input(z_tensor);
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
||||
|
||||
ggml_build_forward_expand(gf, out);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
sd::Tensor<float> _compute(const int n_threads,
|
||||
const sd::Tensor<float>& z,
|
||||
bool decode_graph) override {
|
||||
GGML_ASSERT(!decode_only || decode_graph);
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(z, decode_graph);
|
||||
};
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z.dim());
|
||||
}
|
||||
|
||||
sd::Tensor<float> gaussian_latent_sample(const sd::Tensor<float>& moments, std::shared_ptr<RNG> rng) {
|
||||
// ldm.modules.distributions.distributions.DiagonalGaussianDistribution.sample
|
||||
auto chunks = sd::ops::chunk(moments, 2, 2);
|
||||
const auto& mean = chunks[0];
|
||||
const auto& logvar = chunks[1];
|
||||
sd::Tensor<float> stddev = sd::ops::exp(0.5f * sd::ops::clamp(logvar, -30.0f, 20.0f));
|
||||
sd::Tensor<float> noise = sd::Tensor<float>::randn_like(mean, rng);
|
||||
sd::Tensor<float> latents = mean + stddev * noise;
|
||||
return latents;
|
||||
}
|
||||
|
||||
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
|
||||
if (sd_version_is_flux2(version)) {
|
||||
return vae_output;
|
||||
} else if (version == VERSION_SD1_PIX2PIX) {
|
||||
return sd::ops::chunk(vae_output, 2, 2)[0];
|
||||
} else {
|
||||
return gaussian_latent_sample(vae_output, rng);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents, int channel_dim) {
|
||||
GGML_ASSERT(channel_dim >= 0 && static_cast<size_t>(channel_dim) < static_cast<size_t>(latents.dim()));
|
||||
if (sd_version_is_flux2(version)) {
|
||||
GGML_ASSERT(latents.shape()[channel_dim] == 128);
|
||||
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1);
|
||||
stats_shape[static_cast<size_t>(channel_dim)] = latents.shape()[channel_dim];
|
||||
|
||||
auto mean_tensor = sd::Tensor<float>::from_vector({-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f,
|
||||
-0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f,
|
||||
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f,
|
||||
0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f,
|
||||
-0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f,
|
||||
0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f,
|
||||
0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f,
|
||||
0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f,
|
||||
-0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f,
|
||||
0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f,
|
||||
-0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f,
|
||||
0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f,
|
||||
-0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f,
|
||||
0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f,
|
||||
-0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f,
|
||||
-0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f});
|
||||
mean_tensor.reshape_(stats_shape);
|
||||
auto std_tensor = sd::Tensor<float>::from_vector({1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f,
|
||||
1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f,
|
||||
1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f,
|
||||
1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f,
|
||||
1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f,
|
||||
1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f,
|
||||
1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f,
|
||||
1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f,
|
||||
1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f,
|
||||
1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f,
|
||||
1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f,
|
||||
1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f,
|
||||
1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f,
|
||||
1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
|
||||
1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
|
||||
1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f});
|
||||
std_tensor.reshape_(stats_shape);
|
||||
return {std::move(mean_tensor), std::move(std_tensor)};
|
||||
} else {
|
||||
GGML_ABORT("unknown version %d", version);
|
||||
}
|
||||
}
|
||||
|
||||
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
|
||||
if (sd_version_is_flux2(version)) {
|
||||
int channel_dim = 2;
|
||||
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim);
|
||||
return (latents * std_tensor) / scale_factor + mean_tensor;
|
||||
}
|
||||
return (latents / scale_factor) + shift_factor;
|
||||
}
|
||||
|
||||
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
|
||||
if (sd_version_is_flux2(version)) {
|
||||
int channel_dim = 2;
|
||||
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim);
|
||||
return ((latents - mean_tensor) * scale_factor) / std_tensor;
|
||||
}
|
||||
return (latents - shift_factor) * scale_factor;
|
||||
}
|
||||
|
||||
int get_encoder_output_channels(int input_channels) {
|
||||
return ae.get_encoder_output_channels();
|
||||
}
|
||||
|
||||
void test() {
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// CPU, x{1, 3, 64, 64}: Pass
|
||||
// CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan
|
||||
// CPU, x{2, 3, 64, 64}: Wrong result
|
||||
// CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result
|
||||
sd::Tensor<float> x({64, 64, 3, 2});
|
||||
x.fill_(0.5f);
|
||||
print_sd_tensor(x);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
auto out_opt = _compute(8, x, false);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("encode test done in %lldms", t1 - t0);
|
||||
}
|
||||
|
||||
if (false) {
|
||||
// CPU, z{1, 4, 8, 8}: Pass
|
||||
// CUDA, z{1, 4, 8, 8}: Pass
|
||||
// CPU, z{3, 4, 8, 8}: Wrong result
|
||||
// CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result
|
||||
sd::Tensor<float> z({8, 8, 4, 1});
|
||||
z.fill_(0.5f);
|
||||
print_sd_tensor(z);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
auto out_opt = _compute(8, z, true);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("decode test done in %lldms", t1 - t0);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
#endif // __AUTO_ENCODER_KL_HPP__
|
||||
@ -8,7 +8,9 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "condition_cache_utils.hpp"
|
||||
#include "ggml_extend.hpp"
|
||||
#include "tensor.hpp"
|
||||
|
||||
struct DBCacheConfig {
|
||||
bool enabled = false;
|
||||
@ -603,87 +605,6 @@ inline std::vector<int> generate_scm_mask(
|
||||
return mask;
|
||||
}
|
||||
|
||||
inline std::vector<int> get_scm_preset(const std::string& preset, int total_steps) {
|
||||
struct Preset {
|
||||
std::vector<int> compute_bins;
|
||||
std::vector<int> cache_bins;
|
||||
};
|
||||
|
||||
Preset slow = {{8, 3, 3, 2, 1, 1}, {1, 2, 2, 2, 3}};
|
||||
Preset medium = {{6, 2, 2, 2, 2, 1}, {1, 3, 3, 3, 3}};
|
||||
Preset fast = {{6, 1, 1, 1, 1, 1}, {1, 3, 4, 5, 4}};
|
||||
Preset ultra = {{4, 1, 1, 1, 1}, {2, 5, 6, 7}};
|
||||
|
||||
Preset* p = nullptr;
|
||||
if (preset == "slow" || preset == "s" || preset == "S")
|
||||
p = &slow;
|
||||
else if (preset == "medium" || preset == "m" || preset == "M")
|
||||
p = &medium;
|
||||
else if (preset == "fast" || preset == "f" || preset == "F")
|
||||
p = &fast;
|
||||
else if (preset == "ultra" || preset == "u" || preset == "U")
|
||||
p = &ultra;
|
||||
else
|
||||
return {};
|
||||
|
||||
if (total_steps != 28 && total_steps > 0) {
|
||||
float scale = static_cast<float>(total_steps) / 28.0f;
|
||||
std::vector<int> scaled_compute, scaled_cache;
|
||||
|
||||
for (int v : p->compute_bins) {
|
||||
scaled_compute.push_back(std::max(1, static_cast<int>(v * scale + 0.5f)));
|
||||
}
|
||||
for (int v : p->cache_bins) {
|
||||
scaled_cache.push_back(std::max(1, static_cast<int>(v * scale + 0.5f)));
|
||||
}
|
||||
|
||||
return generate_scm_mask(scaled_compute, scaled_cache, total_steps);
|
||||
}
|
||||
|
||||
return generate_scm_mask(p->compute_bins, p->cache_bins, total_steps);
|
||||
}
|
||||
|
||||
inline float get_preset_threshold(const std::string& preset) {
|
||||
if (preset == "slow" || preset == "s" || preset == "S")
|
||||
return 0.20f;
|
||||
if (preset == "medium" || preset == "m" || preset == "M")
|
||||
return 0.25f;
|
||||
if (preset == "fast" || preset == "f" || preset == "F")
|
||||
return 0.30f;
|
||||
if (preset == "ultra" || preset == "u" || preset == "U")
|
||||
return 0.34f;
|
||||
return 0.08f;
|
||||
}
|
||||
|
||||
inline int get_preset_warmup(const std::string& preset) {
|
||||
if (preset == "slow" || preset == "s" || preset == "S")
|
||||
return 8;
|
||||
if (preset == "medium" || preset == "m" || preset == "M")
|
||||
return 6;
|
||||
if (preset == "fast" || preset == "f" || preset == "F")
|
||||
return 6;
|
||||
if (preset == "ultra" || preset == "u" || preset == "U")
|
||||
return 4;
|
||||
return 8;
|
||||
}
|
||||
|
||||
inline int get_preset_Fn(const std::string& preset) {
|
||||
if (preset == "slow" || preset == "s" || preset == "S")
|
||||
return 8;
|
||||
if (preset == "medium" || preset == "m" || preset == "M")
|
||||
return 8;
|
||||
if (preset == "fast" || preset == "f" || preset == "F")
|
||||
return 6;
|
||||
if (preset == "ultra" || preset == "u" || preset == "U")
|
||||
return 4;
|
||||
return 8;
|
||||
}
|
||||
|
||||
inline int get_preset_Bn(const std::string& preset) {
|
||||
(void)preset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
inline void parse_dbcache_options(const std::string& opts, DBCacheConfig& cfg) {
|
||||
if (opts.empty())
|
||||
return;
|
||||
@ -852,35 +773,37 @@ struct CacheDitConditionState {
|
||||
return it != cache_diffs.end() && !it->second.diff.empty();
|
||||
}
|
||||
|
||||
void update_cache(const void* cond, const float* input, const float* output, size_t size) {
|
||||
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
|
||||
CacheEntry& entry = cache_diffs[cond];
|
||||
entry.diff.resize(size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
entry.diff[i] = output[i] - input[i];
|
||||
if (!sd::store_condition_cache_diff(&entry.diff, input, output)) {
|
||||
entry.prev_input.clear();
|
||||
entry.prev_output.clear();
|
||||
entry.has_prev = false;
|
||||
return;
|
||||
}
|
||||
|
||||
size_t size = static_cast<size_t>(output.numel());
|
||||
const float* input_data = input.data();
|
||||
const float* output_data = output.data();
|
||||
entry.prev_input.resize(size);
|
||||
entry.prev_output.resize(size);
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
entry.prev_input[i] = input[i];
|
||||
entry.prev_output[i] = output[i];
|
||||
entry.prev_input[i] = input_data[i];
|
||||
entry.prev_output[i] = output_data[i];
|
||||
}
|
||||
entry.has_prev = true;
|
||||
}
|
||||
|
||||
void apply_cache(const void* cond, const float* input, float* output, size_t size) {
|
||||
void apply_cache(const void* cond,
|
||||
const sd::Tensor<float>& input,
|
||||
sd::Tensor<float>* output) {
|
||||
auto it = cache_diffs.find(cond);
|
||||
if (it == cache_diffs.end() || it->second.diff.empty())
|
||||
return;
|
||||
if (it->second.diff.size() != size)
|
||||
return;
|
||||
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
output[i] = input[i] + it->second.diff[i];
|
||||
}
|
||||
sd::apply_condition_cache_diff(it->second.diff, input, output);
|
||||
}
|
||||
|
||||
bool before_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output, float sigma, int step_index) {
|
||||
bool before_condition(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output, float sigma, int step_index) {
|
||||
if (!enabled() || step_index < 0)
|
||||
return false;
|
||||
|
||||
@ -900,8 +823,7 @@ struct CacheDitConditionState {
|
||||
|
||||
if (skip_current_step) {
|
||||
if (has_cache(cond)) {
|
||||
apply_cache(cond, (float*)input->data, (float*)output->data,
|
||||
static_cast<size_t>(ggml_nelements(output)));
|
||||
apply_cache(cond, input, output);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -914,11 +836,11 @@ struct CacheDitConditionState {
|
||||
if (it == cache_diffs.end() || !it->second.has_prev)
|
||||
return false;
|
||||
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
||||
size_t ne = static_cast<size_t>(input.numel());
|
||||
if (it->second.prev_input.size() != ne)
|
||||
return false;
|
||||
|
||||
float* input_data = (float*)input->data;
|
||||
const float* input_data = input.data();
|
||||
float diff = CacheDitState::calculate_residual_diff(
|
||||
it->second.prev_input.data(), input_data, ne);
|
||||
|
||||
@ -940,7 +862,7 @@ struct CacheDitConditionState {
|
||||
cached_steps.push_back(current_step_index);
|
||||
continuous_cached_steps++;
|
||||
accumulated_residual_diff += diff;
|
||||
apply_cache(cond, input_data, (float*)output->data, ne);
|
||||
apply_cache(cond, input, output);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -948,15 +870,14 @@ struct CacheDitConditionState {
|
||||
return false;
|
||||
}
|
||||
|
||||
void after_condition(const void* cond, struct ggml_tensor* input, struct ggml_tensor* output) {
|
||||
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
|
||||
if (!step_is_active())
|
||||
return;
|
||||
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(output));
|
||||
update_cache(cond, (float*)input->data, (float*)output->data, ne);
|
||||
update_cache(cond, input, output);
|
||||
|
||||
if (cond == anchor_condition && taylor_config.enabled) {
|
||||
taylor_state.update_derivatives((float*)output->data, ne, current_step_index);
|
||||
taylor_state.update_derivatives(output.data(), static_cast<size_t>(output.numel()), current_step_index);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
93
src/clip.hpp
93
src/clip.hpp
@ -473,7 +473,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, n_token, d_model]
|
||||
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
||||
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
||||
@ -511,7 +511,7 @@ public:
|
||||
blocks["mlp"] = std::shared_ptr<GGMLBlock>(new CLIPMLP(d_model, intermediate_size));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* mask = nullptr) {
|
||||
// x: [N, n_token, d_model]
|
||||
auto self_attn = std::dynamic_pointer_cast<MultiheadAttention>(blocks["self_attn"]);
|
||||
auto layer_norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm1"]);
|
||||
@ -541,9 +541,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* mask = nullptr,
|
||||
int clip_skip = -1) {
|
||||
// x: [N, n_token, d_model]
|
||||
int layer_idx = n_layer - 1;
|
||||
@ -573,7 +573,7 @@ protected:
|
||||
int64_t num_positions;
|
||||
bool force_clip_f32;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type token_wtype = GGML_TYPE_F32;
|
||||
if (!force_clip_f32) {
|
||||
token_wtype = get_type(prefix + "token_embedding.weight", tensor_storage_map, GGML_TYPE_F32);
|
||||
@ -597,13 +597,13 @@ public:
|
||||
force_clip_f32(force_clip_f32) {
|
||||
}
|
||||
|
||||
struct ggml_tensor* get_token_embed_weight() {
|
||||
ggml_tensor* get_token_embed_weight() {
|
||||
return params["token_embedding.weight"];
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* custom_embed_weight) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* custom_embed_weight) {
|
||||
// input_ids: [N, n_token]
|
||||
auto token_embed_weight = params["token_embedding.weight"];
|
||||
auto position_embed_weight = params["position_embedding.weight"];
|
||||
@ -630,7 +630,7 @@ protected:
|
||||
int num_patches;
|
||||
int64_t num_positions;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type patch_wtype = GGML_TYPE_F16;
|
||||
enum ggml_type class_wtype = GGML_TYPE_F32;
|
||||
enum ggml_type position_wtype = GGML_TYPE_F32;
|
||||
@ -653,7 +653,7 @@ public:
|
||||
num_positions = num_patches + 1;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* pixel_values) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* pixel_values) {
|
||||
// pixel_values: [N, num_channels, image_size, image_size]
|
||||
// return: [N, num_positions, embed_dim]
|
||||
GGML_ASSERT(pixel_values->ne[0] == image_size && pixel_values->ne[1] == image_size && pixel_values->ne[2] == num_channels);
|
||||
@ -663,18 +663,18 @@ public:
|
||||
auto position_embed_weight = params["position_embedding.weight"];
|
||||
|
||||
// concat(patch_embedding, class_embedding) + position_embedding
|
||||
struct ggml_tensor* patch_embedding;
|
||||
ggml_tensor* patch_embedding;
|
||||
int64_t N = pixel_values->ne[3];
|
||||
patch_embedding = ggml_ext_conv_2d(ctx->ggml_ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
|
||||
patch_embedding = ggml_reshape_3d(ctx->ggml_ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
|
||||
patch_embedding = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
|
||||
patch_embedding = ggml_reshape_4d(ctx->ggml_ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
|
||||
|
||||
struct ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx->ggml_ctx, GGML_TYPE_F32, embed_dim, N);
|
||||
ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx->ggml_ctx, GGML_TYPE_F32, embed_dim, N);
|
||||
class_embedding = ggml_repeat(ctx->ggml_ctx, class_embed_weight, class_embedding); // [N, embed_dim]
|
||||
class_embedding = ggml_reshape_4d(ctx->ggml_ctx, class_embedding, 1, embed_dim, 1, N); // [N, 1, embed_dim, 1]
|
||||
|
||||
struct ggml_tensor* x = ggml_concat(ctx->ggml_ctx, class_embedding, patch_embedding, 2); // [N, num_positions, embed_dim, 1]
|
||||
ggml_tensor* x = ggml_concat(ctx->ggml_ctx, class_embedding, patch_embedding, 2); // [N, num_positions, embed_dim, 1]
|
||||
x = ggml_reshape_3d(ctx->ggml_ctx, x, embed_dim, num_positions, N); // [N, num_positions, embed_dim]
|
||||
x = ggml_add(ctx->ggml_ctx, x, position_embed_weight);
|
||||
return x; // [N, num_positions, embed_dim]
|
||||
@ -693,7 +693,7 @@ enum CLIPVersion {
|
||||
|
||||
class CLIPTextModel : public GGMLBlock {
|
||||
protected:
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
if (version == OPEN_CLIP_VIT_BIGG_14) {
|
||||
enum ggml_type wtype = GGML_TYPE_F32;
|
||||
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
|
||||
@ -734,15 +734,15 @@ public:
|
||||
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
|
||||
}
|
||||
|
||||
struct ggml_tensor* get_token_embed_weight() {
|
||||
ggml_tensor* get_token_embed_weight() {
|
||||
auto embeddings = std::dynamic_pointer_cast<CLIPEmbeddings>(blocks["embeddings"]);
|
||||
return embeddings->get_token_embed_weight();
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* tkn_embeddings,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* tkn_embeddings,
|
||||
ggml_tensor* mask = nullptr,
|
||||
size_t max_token_idx = 0,
|
||||
bool return_pooled = false,
|
||||
int clip_skip = -1) {
|
||||
@ -804,8 +804,8 @@ public:
|
||||
blocks["post_layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(hidden_size));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* pixel_values,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* pixel_values,
|
||||
bool return_pooled = true,
|
||||
int clip_skip = -1) {
|
||||
// pixel_values: [N, num_channels, image_size, image_size]
|
||||
@ -839,7 +839,7 @@ protected:
|
||||
int64_t out_features;
|
||||
bool transpose_weight;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||
if (transpose_weight) {
|
||||
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
|
||||
@ -856,8 +856,8 @@ public:
|
||||
out_features(out_features),
|
||||
transpose_weight(transpose_weight) {}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
struct ggml_tensor* w = params["weight"];
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
ggml_tensor* w = params["weight"];
|
||||
if (transpose_weight) {
|
||||
w = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, w));
|
||||
}
|
||||
@ -886,8 +886,8 @@ public:
|
||||
blocks["visual_projection"] = std::shared_ptr<GGMLBlock>(new CLIPProjection(hidden_size, projection_dim, transpose_proj_w));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* pixel_values,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* pixel_values,
|
||||
bool return_pooled = true,
|
||||
int clip_skip = -1) {
|
||||
// pixel_values: [N, num_channels, image_size, image_size]
|
||||
@ -936,14 +936,14 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
||||
return "clip";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
model.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* embeddings,
|
||||
struct ggml_tensor* mask,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* embeddings,
|
||||
ggml_tensor* mask,
|
||||
size_t max_token_idx = 0,
|
||||
bool return_pooled = false,
|
||||
int clip_skip = -1) {
|
||||
@ -957,17 +957,16 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
||||
return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
|
||||
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
|
||||
int num_custom_embeddings = 0,
|
||||
void* custom_embeddings_data = nullptr,
|
||||
size_t max_token_idx = 0,
|
||||
bool return_pooled = false,
|
||||
int clip_skip = -1) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(2048);
|
||||
ggml_cgraph* gf = new_graph_custom(2048);
|
||||
ggml_tensor* input_ids = make_input(input_ids_tensor);
|
||||
|
||||
input_ids = to_backend(input_ids);
|
||||
|
||||
struct ggml_tensor* embeddings = nullptr;
|
||||
ggml_tensor* embeddings = nullptr;
|
||||
|
||||
if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) {
|
||||
auto token_embed_weight = model.get_token_embed_weight();
|
||||
@ -997,26 +996,28 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip);
|
||||
ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, embeddings, attention_mask, max_token_idx, return_pooled, clip_skip);
|
||||
|
||||
ggml_build_forward_expand(gf, hidden_states);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* input_ids,
|
||||
sd::Tensor<float> compute(const int n_threads,
|
||||
const sd::Tensor<int32_t>& input_ids,
|
||||
int num_custom_embeddings,
|
||||
void* custom_embeddings_data,
|
||||
size_t max_token_idx,
|
||||
bool return_pooled,
|
||||
int clip_skip,
|
||||
ggml_tensor** output,
|
||||
ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
int clip_skip) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
|
||||
};
|
||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||
auto result = GGMLRunner::compute<float>(get_graph, n_threads, true);
|
||||
if (return_pooled) {
|
||||
return take_or_empty(std::move(result));
|
||||
}
|
||||
return restore_trailing_singleton_dims(std::move(result), 3);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, channels, h, w]
|
||||
if (vae_downsample) {
|
||||
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
||||
@ -52,7 +52,7 @@ public:
|
||||
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, channels, h, w]
|
||||
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
||||
|
||||
@ -121,7 +121,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = nullptr) {
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* emb = nullptr) {
|
||||
// For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml
|
||||
// [N, c, t, h, w] => [N, c, t, h * w]
|
||||
// x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w]
|
||||
@ -188,7 +188,7 @@ public:
|
||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out * 2));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [ne3, ne2, ne1, dim_in]
|
||||
// return: [ne3, ne2, ne1, dim_out]
|
||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||
@ -214,7 +214,7 @@ public:
|
||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [ne3, ne2, ne1, dim_in]
|
||||
// return: [ne3, ne2, ne1, dim_out]
|
||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||
@ -258,7 +258,7 @@ public:
|
||||
blocks["net.2"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim_out, true, false, force_prec_f32, scale));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [ne3, ne2, ne1, dim]
|
||||
// return: [ne3, ne2, ne1, dim_out]
|
||||
|
||||
@ -297,9 +297,9 @@ public:
|
||||
// to_out_1 is nn.Dropout(), skip for inference
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context) {
|
||||
// x: [N, n_token, query_dim]
|
||||
// context: [N, n_context, context_dim]
|
||||
// return: [N, n_token, query_dim]
|
||||
@ -355,9 +355,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context) {
|
||||
// x: [N, n_token, query_dim]
|
||||
// context: [N, n_context, context_dim]
|
||||
// return: [N, n_token, query_dim]
|
||||
@ -406,7 +406,7 @@ protected:
|
||||
int64_t context_dim = 768; // hidden_size, 1024 for VERSION_SD2
|
||||
bool use_linear = false;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") {
|
||||
auto iter = tensor_storage_map.find(prefix + "proj_out.weight");
|
||||
if (iter != tensor_storage_map.end()) {
|
||||
int64_t inner_dim = n_head * d_head;
|
||||
@ -456,9 +456,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context) {
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// context: [N, max_position(aka n_token), hidden_size(aka context_dim)]
|
||||
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
|
||||
@ -510,7 +510,7 @@ public:
|
||||
|
||||
class AlphaBlender : public GGMLBlock {
|
||||
protected:
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
||||
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
|
||||
enum ggml_type wtype = GGML_TYPE_F32;
|
||||
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
||||
@ -530,9 +530,9 @@ public:
|
||||
// since mix_factor.shape is [1,], we don't need rearrange using rearrange_pattern
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x_spatial,
|
||||
struct ggml_tensor* x_temporal) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x_spatial,
|
||||
ggml_tensor* x_temporal) {
|
||||
// image_only_indicator is always tensor([0.])
|
||||
float alpha = get_alpha();
|
||||
auto x = ggml_add(ctx->ggml_ctx,
|
||||
@ -555,9 +555,9 @@ public:
|
||||
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* emb,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* emb,
|
||||
int num_video_frames) {
|
||||
// x: [N, channels, h, w] aka [b*t, channels, h, w]
|
||||
// emb: [N, emb_channels] aka [b*t, emb_channels]
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
#include "ggml_extend.hpp"
|
||||
|
||||
namespace DiT {
|
||||
ggml_tensor* patchify(ggml_context* ctx,
|
||||
inline ggml_tensor* patchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int pw,
|
||||
int ph,
|
||||
@ -33,7 +33,7 @@ namespace DiT {
|
||||
return x;
|
||||
}
|
||||
|
||||
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||
inline ggml_tensor* unpatchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t h,
|
||||
int64_t w,
|
||||
@ -64,7 +64,7 @@ namespace DiT {
|
||||
return x;
|
||||
}
|
||||
|
||||
ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
|
||||
inline ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int ph,
|
||||
int pw) {
|
||||
@ -77,7 +77,7 @@ namespace DiT {
|
||||
return x;
|
||||
}
|
||||
|
||||
ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx,
|
||||
inline ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int ph,
|
||||
int pw,
|
||||
@ -87,7 +87,7 @@ namespace DiT {
|
||||
return x;
|
||||
}
|
||||
|
||||
ggml_tensor* unpatchify_and_crop(ggml_context* ctx,
|
||||
inline ggml_tensor* unpatchify_and_crop(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t H,
|
||||
int64_t W,
|
||||
|
||||
64
src/condition_cache_utils.hpp
Normal file
64
src/condition_cache_utils.hpp
Normal file
@ -0,0 +1,64 @@
|
||||
#ifndef __CONDITION_CACHE_UTILS_HPP__
|
||||
#define __CONDITION_CACHE_UTILS_HPP__
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "tensor.hpp"
|
||||
|
||||
namespace sd {
|
||||
|
||||
inline bool store_condition_cache_diff(std::vector<float>* diff,
|
||||
const sd::Tensor<float>& input,
|
||||
const sd::Tensor<float>& output) {
|
||||
if (diff == nullptr || input.empty() || output.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t input_size = static_cast<size_t>(input.numel());
|
||||
size_t output_size = static_cast<size_t>(output.numel());
|
||||
if (input_size == 0 || input_size != output_size) {
|
||||
diff->clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
const float* input_data = input.data();
|
||||
const float* output_data = output.data();
|
||||
if (input_data == nullptr || output_data == nullptr) {
|
||||
diff->clear();
|
||||
return false;
|
||||
}
|
||||
|
||||
diff->resize(output_size);
|
||||
for (size_t i = 0; i < output_size; ++i) {
|
||||
(*diff)[i] = output_data[i] - input_data[i];
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline bool apply_condition_cache_diff(const std::vector<float>& diff,
|
||||
const sd::Tensor<float>& input,
|
||||
sd::Tensor<float>* output) {
|
||||
if (output == nullptr || input.empty() || diff.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t input_size = static_cast<size_t>(input.numel());
|
||||
if (input_size == 0 || diff.size() != input_size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
*output = input;
|
||||
float* output_data = output->data();
|
||||
if (output_data == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < input_size; ++i) {
|
||||
output_data[i] += diff[i];
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace sd
|
||||
|
||||
#endif // __CONDITION_CACHE_UTILS_HPP__
|
||||
File diff suppressed because it is too large
Load Diff
136
src/control.hpp
136
src/control.hpp
@ -164,26 +164,26 @@ public:
|
||||
blocks["middle_block_out.0"] = std::shared_ptr<GGMLBlock>(make_zero_conv(ch));
|
||||
}
|
||||
|
||||
struct ggml_tensor* resblock_forward(std::string name,
|
||||
ggml_tensor* resblock_forward(std::string name,
|
||||
GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* emb) {
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* emb) {
|
||||
auto block = std::dynamic_pointer_cast<ResBlock>(blocks[name]);
|
||||
return block->forward(ctx, x, emb);
|
||||
}
|
||||
|
||||
struct ggml_tensor* attention_layer_forward(std::string name,
|
||||
ggml_tensor* attention_layer_forward(std::string name,
|
||||
GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context) {
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context) {
|
||||
auto block = std::dynamic_pointer_cast<SpatialTransformer>(blocks[name]);
|
||||
return block->forward(ctx, x, context);
|
||||
}
|
||||
|
||||
struct ggml_tensor* input_hint_block_forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* hint,
|
||||
struct ggml_tensor* emb,
|
||||
struct ggml_tensor* context) {
|
||||
ggml_tensor* input_hint_block_forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* hint,
|
||||
ggml_tensor* emb,
|
||||
ggml_tensor* context) {
|
||||
int num_input_blocks = 15;
|
||||
auto h = hint;
|
||||
for (int i = 0; i < num_input_blocks; i++) {
|
||||
@ -198,13 +198,13 @@ public:
|
||||
return h;
|
||||
}
|
||||
|
||||
std::vector<struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* hint,
|
||||
struct ggml_tensor* guided_hint,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* y = nullptr) {
|
||||
std::vector<ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* hint,
|
||||
ggml_tensor* guided_hint,
|
||||
ggml_tensor* timesteps,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* y = nullptr) {
|
||||
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
|
||||
// timesteps: [N,]
|
||||
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
|
||||
@ -246,7 +246,7 @@ public:
|
||||
emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim]
|
||||
}
|
||||
|
||||
std::vector<struct ggml_tensor*> outs;
|
||||
std::vector<ggml_tensor*> outs;
|
||||
|
||||
if (guided_hint == nullptr) {
|
||||
guided_hint = input_hint_block_forward(ctx, hint, emb, context);
|
||||
@ -310,10 +310,12 @@ struct ControlNet : public GGMLRunner {
|
||||
SDVersion version = VERSION_SD1;
|
||||
ControlNetBlock control_net;
|
||||
|
||||
ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory
|
||||
ggml_backend_buffer_t control_buffer = nullptr;
|
||||
ggml_context* control_ctx = nullptr;
|
||||
std::vector<struct ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5
|
||||
struct ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference
|
||||
std::vector<ggml_tensor*> control_outputs_ggml;
|
||||
ggml_tensor* guided_hint_output_ggml = nullptr;
|
||||
std::vector<sd::Tensor<float>> controls;
|
||||
sd::Tensor<float> guided_hint;
|
||||
bool guided_hint_cached = false;
|
||||
|
||||
ControlNet(ggml_backend_t backend,
|
||||
@ -328,23 +330,23 @@ struct ControlNet : public GGMLRunner {
|
||||
free_control_ctx();
|
||||
}
|
||||
|
||||
void alloc_control_ctx(std::vector<struct ggml_tensor*> outs) {
|
||||
struct ggml_init_params params;
|
||||
void alloc_control_ctx(std::vector<ggml_tensor*> outs) {
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024;
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = true;
|
||||
control_ctx = ggml_init(params);
|
||||
|
||||
controls.resize(outs.size() - 1);
|
||||
control_outputs_ggml.resize(outs.size() - 1);
|
||||
|
||||
size_t control_buffer_size = 0;
|
||||
|
||||
guided_hint = ggml_dup_tensor(control_ctx, outs[0]);
|
||||
control_buffer_size += ggml_nbytes(guided_hint);
|
||||
guided_hint_output_ggml = ggml_dup_tensor(control_ctx, outs[0]);
|
||||
control_buffer_size += ggml_nbytes(guided_hint_output_ggml);
|
||||
|
||||
for (int i = 0; i < outs.size() - 1; i++) {
|
||||
controls[i] = ggml_dup_tensor(control_ctx, outs[i + 1]);
|
||||
control_buffer_size += ggml_nbytes(controls[i]);
|
||||
control_outputs_ggml[i] = ggml_dup_tensor(control_ctx, outs[i + 1]);
|
||||
control_buffer_size += ggml_nbytes(control_outputs_ggml[i]);
|
||||
}
|
||||
|
||||
control_buffer = ggml_backend_alloc_ctx_tensors(control_ctx, runtime_backend);
|
||||
@ -361,8 +363,10 @@ struct ControlNet : public GGMLRunner {
|
||||
ggml_free(control_ctx);
|
||||
control_ctx = nullptr;
|
||||
}
|
||||
guided_hint = nullptr;
|
||||
guided_hint_output_ggml = nullptr;
|
||||
guided_hint_cached = false;
|
||||
guided_hint = {};
|
||||
control_outputs_ggml.clear();
|
||||
controls.clear();
|
||||
}
|
||||
|
||||
@ -370,33 +374,37 @@ struct ControlNet : public GGMLRunner {
|
||||
return "control_net";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
control_net.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* hint,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* y = nullptr) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& hint_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor = {},
|
||||
const sd::Tensor<float>& y_tensor = {}) {
|
||||
ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
|
||||
|
||||
x = to_backend(x);
|
||||
if (guided_hint_cached) {
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* hint = nullptr;
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
ggml_tensor* context = make_optional_input(context_tensor);
|
||||
ggml_tensor* y = make_optional_input(y_tensor);
|
||||
|
||||
ggml_tensor* guided_hint_input = nullptr;
|
||||
if (guided_hint_cached && !guided_hint.empty()) {
|
||||
guided_hint_input = make_input(guided_hint);
|
||||
hint = nullptr;
|
||||
} else {
|
||||
hint = to_backend(hint);
|
||||
hint = make_input(hint_tensor);
|
||||
}
|
||||
context = to_backend(context);
|
||||
y = to_backend(y);
|
||||
timesteps = to_backend(timesteps);
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
auto outs = control_net.forward(&runner_ctx,
|
||||
x,
|
||||
hint,
|
||||
guided_hint_cached ? guided_hint : nullptr,
|
||||
guided_hint_input,
|
||||
timesteps,
|
||||
context,
|
||||
y);
|
||||
@ -405,36 +413,46 @@ struct ControlNet : public GGMLRunner {
|
||||
alloc_control_ctx(outs);
|
||||
}
|
||||
|
||||
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint));
|
||||
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint_output_ggml));
|
||||
for (int i = 0; i < outs.size() - 1; i++) {
|
||||
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], controls[i]));
|
||||
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], control_outputs_ggml[i]));
|
||||
}
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* hint,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
std::optional<std::vector<sd::Tensor<float>>> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& hint,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context = {},
|
||||
const sd::Tensor<float>& y = {}) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// timesteps: [N, ]
|
||||
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
|
||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, hint, timesteps, context, y);
|
||||
};
|
||||
|
||||
bool res = GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
if (res) {
|
||||
// cache guided_hint
|
||||
guided_hint_cached = true;
|
||||
auto compute_result = GGMLRunner::compute<float>(get_graph, n_threads, false);
|
||||
if (!compute_result.has_value()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
return res;
|
||||
|
||||
if (guided_hint_output_ggml != nullptr) {
|
||||
guided_hint = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(guided_hint_output_ggml),
|
||||
4);
|
||||
}
|
||||
controls.clear();
|
||||
controls.reserve(control_outputs_ggml.size());
|
||||
for (ggml_tensor* control : control_outputs_ggml) {
|
||||
auto control_host = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(control), 4);
|
||||
GGML_ASSERT(!control_host.empty());
|
||||
controls.push_back(std::move(control_host));
|
||||
}
|
||||
guided_hint_cached = true;
|
||||
return controls;
|
||||
}
|
||||
|
||||
bool load_from_file(const std::string& file_path, int n_threads) {
|
||||
|
||||
1199
src/denoiser.hpp
1199
src/denoiser.hpp
File diff suppressed because it is too large
Load Diff
@ -1,41 +1,49 @@
|
||||
#ifndef __DIFFUSION_MODEL_H__
|
||||
#define __DIFFUSION_MODEL_H__
|
||||
|
||||
#include <optional>
|
||||
#include "anima.hpp"
|
||||
#include "flux.hpp"
|
||||
#include "mmdit.hpp"
|
||||
#include "qwen_image.hpp"
|
||||
#include "tensor_ggml.hpp"
|
||||
#include "unet.hpp"
|
||||
#include "wan.hpp"
|
||||
#include "z_image.hpp"
|
||||
|
||||
struct DiffusionParams {
|
||||
struct ggml_tensor* x = nullptr;
|
||||
struct ggml_tensor* timesteps = nullptr;
|
||||
struct ggml_tensor* context = nullptr;
|
||||
struct ggml_tensor* c_concat = nullptr;
|
||||
struct ggml_tensor* y = nullptr;
|
||||
struct ggml_tensor* guidance = nullptr;
|
||||
std::vector<ggml_tensor*> ref_latents = {};
|
||||
const sd::Tensor<float>* x = nullptr;
|
||||
const sd::Tensor<float>* timesteps = nullptr;
|
||||
const sd::Tensor<float>* context = nullptr;
|
||||
const sd::Tensor<float>* c_concat = nullptr;
|
||||
const sd::Tensor<float>* y = nullptr;
|
||||
const sd::Tensor<int32_t>* t5_ids = nullptr;
|
||||
const sd::Tensor<float>* t5_weights = nullptr;
|
||||
const sd::Tensor<float>* guidance = nullptr;
|
||||
const std::vector<sd::Tensor<float>>* ref_latents = nullptr;
|
||||
bool increase_ref_index = false;
|
||||
int num_video_frames = -1;
|
||||
std::vector<struct ggml_tensor*> controls = {};
|
||||
const std::vector<sd::Tensor<float>>* controls = nullptr;
|
||||
float control_strength = 0.f;
|
||||
struct ggml_tensor* vace_context = nullptr;
|
||||
const sd::Tensor<float>* vace_context = nullptr;
|
||||
float vace_strength = 1.f;
|
||||
std::vector<int> skip_layers = {};
|
||||
const std::vector<int>* skip_layers = nullptr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static inline const sd::Tensor<T>& tensor_or_empty(const sd::Tensor<T>* tensor) {
|
||||
static const sd::Tensor<T> kEmpty;
|
||||
return tensor != nullptr ? *tensor : kEmpty;
|
||||
}
|
||||
|
||||
struct DiffusionModel {
|
||||
virtual std::string get_desc() = 0;
|
||||
virtual bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) = 0;
|
||||
virtual sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) = 0;
|
||||
virtual void alloc_params_buffer() = 0;
|
||||
virtual void free_params_buffer() = 0;
|
||||
virtual void free_compute_buffer() = 0;
|
||||
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) = 0;
|
||||
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) = 0;
|
||||
virtual size_t get_params_buffer_size() = 0;
|
||||
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){};
|
||||
virtual int64_t get_adm_in_channels() = 0;
|
||||
@ -69,7 +77,7 @@ struct UNetModel : public DiffusionModel {
|
||||
unet.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
unet.get_param_tensors(tensors, "model.diffusion_model");
|
||||
}
|
||||
|
||||
@ -93,19 +101,20 @@ struct UNetModel : public DiffusionModel {
|
||||
unet.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
static const std::vector<sd::Tensor<float>> empty_controls;
|
||||
return unet.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.c_concat,
|
||||
diffusion_params.y,
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
tensor_or_empty(diffusion_params.c_concat),
|
||||
tensor_or_empty(diffusion_params.y),
|
||||
diffusion_params.num_video_frames,
|
||||
diffusion_params.controls,
|
||||
diffusion_params.control_strength, output, output_ctx);
|
||||
diffusion_params.controls ? *diffusion_params.controls : empty_controls,
|
||||
diffusion_params.control_strength);
|
||||
}
|
||||
};
|
||||
|
||||
@ -134,7 +143,7 @@ struct MMDiTModel : public DiffusionModel {
|
||||
mmdit.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
mmdit.get_param_tensors(tensors, "model.diffusion_model");
|
||||
}
|
||||
|
||||
@ -158,18 +167,17 @@ struct MMDiTModel : public DiffusionModel {
|
||||
mmdit.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
static const std::vector<int> empty_skip_layers;
|
||||
return mmdit.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.y,
|
||||
output,
|
||||
output_ctx,
|
||||
diffusion_params.skip_layers);
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
tensor_or_empty(diffusion_params.y),
|
||||
diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers);
|
||||
}
|
||||
};
|
||||
|
||||
@ -200,7 +208,7 @@ struct FluxModel : public DiffusionModel {
|
||||
flux.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
flux.get_param_tensors(tensors, "model.diffusion_model");
|
||||
}
|
||||
|
||||
@ -224,22 +232,22 @@ struct FluxModel : public DiffusionModel {
|
||||
flux.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
static const std::vector<sd::Tensor<float>> empty_ref_latents;
|
||||
static const std::vector<int> empty_skip_layers;
|
||||
return flux.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.c_concat,
|
||||
diffusion_params.y,
|
||||
diffusion_params.guidance,
|
||||
diffusion_params.ref_latents,
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
tensor_or_empty(diffusion_params.c_concat),
|
||||
tensor_or_empty(diffusion_params.y),
|
||||
tensor_or_empty(diffusion_params.guidance),
|
||||
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
|
||||
diffusion_params.increase_ref_index,
|
||||
output,
|
||||
output_ctx,
|
||||
diffusion_params.skip_layers);
|
||||
diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers);
|
||||
}
|
||||
};
|
||||
|
||||
@ -270,7 +278,7 @@ struct AnimaModel : public DiffusionModel {
|
||||
anima.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
anima.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -294,18 +302,16 @@ struct AnimaModel : public DiffusionModel {
|
||||
anima.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
return anima.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.c_concat,
|
||||
diffusion_params.y,
|
||||
output,
|
||||
output_ctx);
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
tensor_or_empty(diffusion_params.t5_ids),
|
||||
tensor_or_empty(diffusion_params.t5_weights));
|
||||
}
|
||||
};
|
||||
|
||||
@ -337,7 +343,7 @@ struct WanModel : public DiffusionModel {
|
||||
wan.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
wan.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -361,21 +367,19 @@ struct WanModel : public DiffusionModel {
|
||||
wan.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
return wan.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.y,
|
||||
diffusion_params.c_concat,
|
||||
nullptr,
|
||||
diffusion_params.vace_context,
|
||||
diffusion_params.vace_strength,
|
||||
output,
|
||||
output_ctx);
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
tensor_or_empty(diffusion_params.y),
|
||||
tensor_or_empty(diffusion_params.c_concat),
|
||||
sd::Tensor<float>(),
|
||||
tensor_or_empty(diffusion_params.vace_context),
|
||||
diffusion_params.vace_strength);
|
||||
}
|
||||
};
|
||||
|
||||
@ -408,7 +412,7 @@ struct QwenImageModel : public DiffusionModel {
|
||||
qwen_image.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
qwen_image.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -432,18 +436,17 @@ struct QwenImageModel : public DiffusionModel {
|
||||
qwen_image.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
static const std::vector<sd::Tensor<float>> empty_ref_latents;
|
||||
return qwen_image.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.ref_latents,
|
||||
true, // increase_ref_index
|
||||
output,
|
||||
output_ctx);
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
|
||||
true);
|
||||
}
|
||||
};
|
||||
|
||||
@ -475,7 +478,7 @@ struct ZImageModel : public DiffusionModel {
|
||||
z_image.free_compute_buffer();
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
||||
z_image.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -499,18 +502,17 @@ struct ZImageModel : public DiffusionModel {
|
||||
z_image.set_circular_axes(circular_x, circular_y);
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
DiffusionParams diffusion_params,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const DiffusionParams& diffusion_params) override {
|
||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
||||
static const std::vector<sd::Tensor<float>> empty_ref_latents;
|
||||
return z_image.compute(n_threads,
|
||||
diffusion_params.x,
|
||||
diffusion_params.timesteps,
|
||||
diffusion_params.context,
|
||||
diffusion_params.ref_latents,
|
||||
true, // increase_ref_index
|
||||
output,
|
||||
output_ctx);
|
||||
*diffusion_params.x,
|
||||
*diffusion_params.timesteps,
|
||||
tensor_or_empty(diffusion_params.context),
|
||||
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
|
||||
true);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1,10 +1,15 @@
|
||||
#ifndef __EASYCACHE_HPP__
|
||||
#define __EASYCACHE_HPP__
|
||||
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "condition_cache_utils.hpp"
|
||||
#include "denoiser.hpp"
|
||||
#include "ggml_extend.hpp"
|
||||
#include "tensor.hpp"
|
||||
|
||||
struct EasyCacheConfig {
|
||||
bool enabled = false;
|
||||
@ -26,8 +31,8 @@ struct EasyCacheState {
|
||||
bool initial_step = true;
|
||||
bool skip_current_step = false;
|
||||
bool step_active = false;
|
||||
const SDCondition* anchor_condition = nullptr;
|
||||
std::unordered_map<const SDCondition*, EasyCacheCacheEntry> cache_diffs;
|
||||
const void* anchor_condition = nullptr;
|
||||
std::unordered_map<const void*, EasyCacheCacheEntry> cache_diffs;
|
||||
std::vector<float> prev_input;
|
||||
std::vector<float> prev_output;
|
||||
float output_prev_norm = 0.0f;
|
||||
@ -120,41 +125,30 @@ struct EasyCacheState {
|
||||
return enabled() && step_active && skip_current_step;
|
||||
}
|
||||
|
||||
bool has_cache(const SDCondition* cond) const {
|
||||
bool has_cache(const void* cond) const {
|
||||
auto it = cache_diffs.find(cond);
|
||||
return it != cache_diffs.end() && !it->second.diff.empty();
|
||||
}
|
||||
|
||||
void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
|
||||
EasyCacheCacheEntry& entry = cache_diffs[cond];
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(output));
|
||||
entry.diff.resize(ne);
|
||||
float* out_data = (float*)output->data;
|
||||
float* in_data = (float*)input->data;
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
entry.diff[i] = out_data[i] - in_data[i];
|
||||
}
|
||||
sd::store_condition_cache_diff(&entry.diff, input, output);
|
||||
}
|
||||
|
||||
void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||
void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) {
|
||||
auto it = cache_diffs.find(cond);
|
||||
if (it == cache_diffs.end() || it->second.diff.empty()) {
|
||||
return;
|
||||
}
|
||||
copy_ggml_tensor(output, input);
|
||||
float* out_data = (float*)output->data;
|
||||
const std::vector<float>& diff = it->second.diff;
|
||||
for (size_t i = 0; i < diff.size(); ++i) {
|
||||
out_data[i] += diff[i];
|
||||
}
|
||||
sd::apply_condition_cache_diff(it->second.diff, input, output);
|
||||
}
|
||||
|
||||
bool before_condition(const SDCondition* cond,
|
||||
ggml_tensor* input,
|
||||
ggml_tensor* output,
|
||||
bool before_condition(const void* cond,
|
||||
const sd::Tensor<float>& input,
|
||||
sd::Tensor<float>* output,
|
||||
float sigma,
|
||||
int step_index) {
|
||||
if (!enabled() || step_index < 0) {
|
||||
if (!enabled() || step_index < 0 || output == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (step_index != current_step_index) {
|
||||
@ -181,11 +175,11 @@ struct EasyCacheState {
|
||||
if (!has_prev_input || !has_prev_output || !has_cache(cond)) {
|
||||
return false;
|
||||
}
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
||||
size_t ne = static_cast<size_t>(input.numel());
|
||||
if (prev_input.size() != ne) {
|
||||
return false;
|
||||
}
|
||||
float* input_data = (float*)input->data;
|
||||
const float* input_data = input.data();
|
||||
last_input_change = 0.0f;
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
last_input_change += std::fabs(input_data[i] - prev_input[i]);
|
||||
@ -211,7 +205,7 @@ struct EasyCacheState {
|
||||
return false;
|
||||
}
|
||||
|
||||
void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
|
||||
if (!step_is_active()) {
|
||||
return;
|
||||
}
|
||||
@ -220,15 +214,15 @@ struct EasyCacheState {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
||||
float* in_data = (float*)input->data;
|
||||
size_t ne = static_cast<size_t>(input.numel());
|
||||
const float* in_data = input.data();
|
||||
prev_input.resize(ne);
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
prev_input[i] = in_data[i];
|
||||
}
|
||||
has_prev_input = true;
|
||||
|
||||
float* out_data = (float*)output->data;
|
||||
const float* out_data = output.data();
|
||||
float output_change = 0.0f;
|
||||
if (has_prev_output && prev_output.size() == ne) {
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
@ -263,3 +257,5 @@ struct EasyCacheState {
|
||||
has_last_input_change = false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
@ -27,11 +27,11 @@ public:
|
||||
blocks["conv5"] = std::shared_ptr<GGMLBlock>(new Conv2d(num_feat + 4 * num_grow_ch, num_feat, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* lrelu(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* lrelu(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [n, num_feat, h, w]
|
||||
// return: [n, num_feat, h, w]
|
||||
|
||||
@ -64,7 +64,7 @@ public:
|
||||
blocks["rdb3"] = std::shared_ptr<GGMLBlock>(new ResidualDenseBlock(num_feat, num_grow_ch));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [n, num_feat, h, w]
|
||||
// return: [n, num_feat, h, w]
|
||||
|
||||
@ -112,11 +112,11 @@ public:
|
||||
int get_scale() { return scale; }
|
||||
int get_num_block() { return num_block; }
|
||||
|
||||
struct ggml_tensor* lrelu(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* lrelu(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
return ggml_leaky_relu(ctx->ggml_ctx, x, 0.2f, true);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [n, num_in_ch, h, w]
|
||||
// return: [n, num_out_ch, h*scale, w*scale]
|
||||
auto conv_first = std::dynamic_pointer_cast<Conv2d>(blocks["conv_first"]);
|
||||
@ -341,27 +341,24 @@ struct ESRGAN : public GGMLRunner {
|
||||
return success;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x) {
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor) {
|
||||
if (!rrdb_net)
|
||||
return nullptr;
|
||||
constexpr int kGraphNodes = 1 << 16; // 65k
|
||||
struct ggml_cgraph* gf = new_graph_custom(kGraphNodes);
|
||||
x = to_backend(x);
|
||||
ggml_cgraph* gf = new_graph_custom(kGraphNodes);
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
struct ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
|
||||
ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
|
||||
ggml_build_forward_expand(gf, out);
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor** output,
|
||||
ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
return build_graph(x);
|
||||
};
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
sd::Tensor<float> compute(const int n_threads,
|
||||
const sd::Tensor<float>& x) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* { return build_graph(x); };
|
||||
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
311
src/flux.hpp
311
src/flux.hpp
@ -19,7 +19,7 @@ namespace Flux {
|
||||
blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [..., in_dim]
|
||||
// return: [..., hidden_dim]
|
||||
auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]);
|
||||
@ -37,7 +37,7 @@ namespace Flux {
|
||||
int64_t hidden_size;
|
||||
float eps;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
ggml_type wtype = GGML_TYPE_F32;
|
||||
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||
}
|
||||
@ -48,8 +48,8 @@ namespace Flux {
|
||||
: hidden_size(hidden_size),
|
||||
eps(eps) {}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
struct ggml_tensor* w = params["scale"];
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
ggml_tensor* w = params["scale"];
|
||||
x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
|
||||
x = ggml_mul(ctx->ggml_ctx, x, w);
|
||||
return x;
|
||||
@ -63,7 +63,7 @@ namespace Flux {
|
||||
blocks["key_norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* query_norm(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* query_norm(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [..., dim]
|
||||
// return: [..., dim]
|
||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["query_norm"]);
|
||||
@ -72,7 +72,7 @@ namespace Flux {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* key_norm(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* key_norm(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [..., dim]
|
||||
// return: [..., dim]
|
||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["key_norm"]);
|
||||
@ -98,7 +98,7 @@ namespace Flux {
|
||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim, proj_bias));
|
||||
}
|
||||
|
||||
std::vector<struct ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
std::vector<ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
||||
auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]);
|
||||
|
||||
@ -115,17 +115,17 @@ namespace Flux {
|
||||
return {q, k, v};
|
||||
}
|
||||
|
||||
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||
|
||||
x = proj->forward(ctx, x); // [N, n_token, dim]
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask) {
|
||||
// x: [N, n_token, dim]
|
||||
// pe: [n_token, d_head/2, 2, 2]
|
||||
// return [N, n_token, dim]
|
||||
@ -147,7 +147,7 @@ namespace Flux {
|
||||
blocks["2"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["0"]);
|
||||
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["2"]);
|
||||
|
||||
@ -170,7 +170,7 @@ namespace Flux {
|
||||
blocks["down_proj"] = std::make_shared<Linear>(intermediate_size, hidden_size, bias);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
|
||||
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
|
||||
auto down_proj = std::dynamic_pointer_cast<Linear>(blocks["down_proj"]);
|
||||
@ -212,7 +212,7 @@ namespace Flux {
|
||||
blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier, bias));
|
||||
}
|
||||
|
||||
std::vector<ModulationOut> forward(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
||||
std::vector<ModulationOut> forward(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||
// x: [N, dim]
|
||||
// return: [ModulationOut, ModulationOut]
|
||||
auto lin = std::dynamic_pointer_cast<Linear>(blocks["lin"]);
|
||||
@ -232,10 +232,10 @@ namespace Flux {
|
||||
}
|
||||
};
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* shift,
|
||||
struct ggml_tensor* scale,
|
||||
__STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* shift,
|
||||
ggml_tensor* scale,
|
||||
bool skip_reshape = false) {
|
||||
// x: [N, L, C]
|
||||
// scale: [N, C]
|
||||
@ -294,7 +294,7 @@ namespace Flux {
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<ModulationOut> get_distil_img_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
||||
std::vector<ModulationOut> get_distil_img_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||
// TODO: not hardcoded?
|
||||
const int single_blocks_count = 38;
|
||||
const int double_blocks_count = 19;
|
||||
@ -303,7 +303,7 @@ namespace Flux {
|
||||
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
|
||||
}
|
||||
|
||||
std::vector<ModulationOut> get_distil_txt_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
||||
std::vector<ModulationOut> get_distil_txt_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||
// TODO: not hardcoded?
|
||||
const int single_blocks_count = 38;
|
||||
const int double_blocks_count = 19;
|
||||
@ -312,12 +312,12 @@ namespace Flux {
|
||||
return {ModulationOut(ctx, vec, offset), ModulationOut(ctx, vec, offset + 3)};
|
||||
}
|
||||
|
||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* img,
|
||||
struct ggml_tensor* txt,
|
||||
struct ggml_tensor* vec,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* img,
|
||||
ggml_tensor* txt,
|
||||
ggml_tensor* vec,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr,
|
||||
std::vector<ModulationOut> img_mods = {},
|
||||
std::vector<ModulationOut> txt_mods = {}) {
|
||||
// img: [N, n_img_token, hidden_size]
|
||||
@ -457,16 +457,16 @@ namespace Flux {
|
||||
}
|
||||
}
|
||||
|
||||
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
||||
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||
int64_t offset = 3 * idx;
|
||||
return ModulationOut(ctx, vec, offset);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* vec,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* vec,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr,
|
||||
std::vector<ModulationOut> mods = {}) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// pe: [n_token, d_head/2, 2, 2]
|
||||
@ -539,7 +539,7 @@ namespace Flux {
|
||||
}
|
||||
}
|
||||
|
||||
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, struct ggml_tensor* vec) {
|
||||
ModulationOut get_distil_mod(GGMLRunnerContext* ctx, ggml_tensor* vec) {
|
||||
int64_t offset = vec->ne[2] - 2;
|
||||
int64_t stride = vec->nb[1] * vec->ne[1];
|
||||
auto shift = ggml_view_2d(ctx->ggml_ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim]
|
||||
@ -548,15 +548,15 @@ namespace Flux {
|
||||
return {shift, scale, nullptr};
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||
auto norm_final = std::dynamic_pointer_cast<LayerNorm>(blocks["norm_final"]);
|
||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
||||
struct ggml_tensor *shift, *scale;
|
||||
ggml_tensor *shift, *scale;
|
||||
if (prune_mod) {
|
||||
auto mod = get_distil_mod(ctx, c);
|
||||
shift = mod.shift;
|
||||
@ -589,7 +589,7 @@ namespace Flux {
|
||||
blocks["out_proj"] = std::shared_ptr<GGMLBlock>(new Linear(inner_size, hidden_size, true));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto in_proj = std::dynamic_pointer_cast<Linear>(blocks["in_proj"]);
|
||||
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out_proj"]);
|
||||
|
||||
@ -612,9 +612,9 @@ namespace Flux {
|
||||
blocks["embedder.0"] = std::make_shared<Linear>(in_channels + max_freqs * max_freqs, hidden_size_input);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* dct) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* dct) {
|
||||
// x: (B, P^2, C)
|
||||
// dct: (1, P^2, max_freqs^2)
|
||||
// return: (B, P^2, hidden_size_input)
|
||||
@ -639,9 +639,9 @@ namespace Flux {
|
||||
blocks["norm"] = std::make_shared<RMSNorm>(hidden_size_x);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* s) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* s) {
|
||||
// x: (batch_size, n_token, hidden_size_x)
|
||||
// s: (batch_size, hidden_size_s)
|
||||
// return: (batch_size, n_token, hidden_size_x)
|
||||
@ -689,8 +689,8 @@ namespace Flux {
|
||||
blocks["linear"] = std::make_shared<Linear>(hidden_size, out_channels);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x) {
|
||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
|
||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
||||
|
||||
@ -708,8 +708,8 @@ namespace Flux {
|
||||
blocks["conv"] = std::make_shared<Conv2d>(hidden_size, out_channels, std::pair{3, 3}, std::pair{1, 1}, std::pair{1, 1});
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x) {
|
||||
// x: [N, C, H, W]
|
||||
auto norm = std::dynamic_pointer_cast<RMSNorm>(blocks["norm"]);
|
||||
auto conv = std::dynamic_pointer_cast<Conv2d>(blocks["conv"]);
|
||||
@ -847,14 +847,14 @@ namespace Flux {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* img,
|
||||
struct ggml_tensor* txt,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor* guidance,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mod_index_arange = nullptr,
|
||||
ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* img,
|
||||
ggml_tensor* txt,
|
||||
ggml_tensor* timesteps,
|
||||
ggml_tensor* y,
|
||||
ggml_tensor* guidance,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mod_index_arange = nullptr,
|
||||
std::vector<int> skip_layers = {}) {
|
||||
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
|
||||
auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]);
|
||||
@ -864,8 +864,8 @@ namespace Flux {
|
||||
img = img_in->forward(ctx, img);
|
||||
}
|
||||
|
||||
struct ggml_tensor* vec;
|
||||
struct ggml_tensor* txt_img_mask = nullptr;
|
||||
ggml_tensor* vec;
|
||||
ggml_tensor* txt_img_mask = nullptr;
|
||||
if (params.is_chroma) {
|
||||
int64_t mod_index_length = 344;
|
||||
auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]);
|
||||
@ -967,25 +967,25 @@ namespace Flux {
|
||||
return img;
|
||||
}
|
||||
|
||||
struct ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* predicted,
|
||||
struct ggml_tensor* noisy,
|
||||
struct ggml_tensor* timesteps) {
|
||||
ggml_tensor* _apply_x0_residual(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* predicted,
|
||||
ggml_tensor* noisy,
|
||||
ggml_tensor* timesteps) {
|
||||
auto x = ggml_sub(ctx->ggml_ctx, noisy, predicted);
|
||||
x = ggml_div(ctx->ggml_ctx, x, timesteps);
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor* guidance,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mod_index_arange = nullptr,
|
||||
struct ggml_tensor* dct = nullptr,
|
||||
ggml_tensor* forward_chroma_radiance(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* c_concat,
|
||||
ggml_tensor* y,
|
||||
ggml_tensor* guidance,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mod_index_arange = nullptr,
|
||||
ggml_tensor* dct = nullptr,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
std::vector<int> skip_layers = {}) {
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
@ -1050,16 +1050,16 @@ namespace Flux {
|
||||
return out;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_flux_chroma(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor* guidance,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mod_index_arange = nullptr,
|
||||
struct ggml_tensor* dct = nullptr,
|
||||
ggml_tensor* forward_flux_chroma(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* c_concat,
|
||||
ggml_tensor* y,
|
||||
ggml_tensor* guidance,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mod_index_arange = nullptr,
|
||||
ggml_tensor* dct = nullptr,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
std::vector<int> skip_layers = {}) {
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
@ -1119,16 +1119,16 @@ namespace Flux {
|
||||
return out;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor* guidance,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mod_index_arange = nullptr,
|
||||
struct ggml_tensor* dct = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* c_concat,
|
||||
ggml_tensor* y,
|
||||
ggml_tensor* guidance,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mod_index_arange = nullptr,
|
||||
ggml_tensor* dct = nullptr,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
std::vector<int> skip_layers = {}) {
|
||||
// Forward pass of DiT.
|
||||
@ -1178,6 +1178,7 @@ namespace Flux {
|
||||
std::vector<float> pe_vec;
|
||||
std::vector<float> mod_index_arange_vec;
|
||||
std::vector<float> dct_vec;
|
||||
sd::Tensor<float> guidance_tensor;
|
||||
SDVersion version;
|
||||
bool use_mask = false;
|
||||
|
||||
@ -1299,7 +1300,7 @@ namespace Flux {
|
||||
return "flux";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
flux.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -1353,29 +1354,42 @@ namespace Flux {
|
||||
return dct;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor* guidance,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor = {},
|
||||
const sd::Tensor<float>& c_concat_tensor = {},
|
||||
const sd::Tensor<float>& y_tensor = {},
|
||||
const sd::Tensor<float>& guidance_tensor = {},
|
||||
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
|
||||
bool increase_ref_index = false,
|
||||
std::vector<int> skip_layers = {}) {
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
struct ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
|
||||
|
||||
struct ggml_tensor* mod_index_arange = nullptr;
|
||||
struct ggml_tensor* dct = nullptr; // for chroma radiance
|
||||
|
||||
x = to_backend(x);
|
||||
context = to_backend(context);
|
||||
if (c_concat != nullptr) {
|
||||
c_concat = to_backend(c_concat);
|
||||
}
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
ggml_tensor* context = make_optional_input(context_tensor);
|
||||
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
|
||||
ggml_tensor* y = make_optional_input(y_tensor);
|
||||
if (flux_params.guidance_embed || flux_params.is_chroma) {
|
||||
if (!guidance_tensor.empty()) {
|
||||
this->guidance_tensor = guidance_tensor;
|
||||
if (flux_params.is_chroma) {
|
||||
guidance = ggml_set_f32(guidance, 0);
|
||||
this->guidance_tensor.fill_(0.f);
|
||||
}
|
||||
}
|
||||
}
|
||||
ggml_tensor* guidance = make_optional_input(this->guidance_tensor);
|
||||
std::vector<ggml_tensor*> ref_latents;
|
||||
ref_latents.reserve(ref_latents_tensor.size());
|
||||
for (const auto& ref_latent_tensor : ref_latents_tensor) {
|
||||
ref_latents.push_back(make_input(ref_latent_tensor));
|
||||
}
|
||||
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
|
||||
|
||||
ggml_tensor* mod_index_arange = nullptr;
|
||||
ggml_tensor* dct = nullptr; // for chroma radiance
|
||||
|
||||
if (flux_params.is_chroma) {
|
||||
if (!use_mask) {
|
||||
y = nullptr;
|
||||
}
|
||||
@ -1385,16 +1399,6 @@ namespace Flux {
|
||||
mod_index_arange = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, mod_index_arange_vec.size());
|
||||
set_backend_tensor_data(mod_index_arange, mod_index_arange_vec.data());
|
||||
}
|
||||
y = to_backend(y);
|
||||
|
||||
timesteps = to_backend(timesteps);
|
||||
if (flux_params.guidance_embed || flux_params.is_chroma) {
|
||||
guidance = to_backend(guidance);
|
||||
}
|
||||
for (int i = 0; i < ref_latents.size(); i++) {
|
||||
ref_latents[i] = to_backend(ref_latents[i]);
|
||||
}
|
||||
|
||||
std::set<int> txt_arange_dims;
|
||||
if (sd_version_is_flux2(version)) {
|
||||
txt_arange_dims = {3};
|
||||
@ -1437,7 +1441,7 @@ namespace Flux {
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = flux.forward(&runner_ctx,
|
||||
ggml_tensor* out = flux.forward(&runner_ctx,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
@ -1455,71 +1459,80 @@ namespace Flux {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor* guidance,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context = {},
|
||||
const sd::Tensor<float>& c_concat = {},
|
||||
const sd::Tensor<float>& y = {},
|
||||
const sd::Tensor<float>& guidance = {},
|
||||
const std::vector<sd::Tensor<float>>& ref_latents = {},
|
||||
bool increase_ref_index = false,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr,
|
||||
std::vector<int> skip_layers = std::vector<int>()) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// timesteps: [N, ]
|
||||
// context: [N, max_position, hidden_size]
|
||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||
// guidance: [N, ]
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
return result;
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// cpu f16:
|
||||
// cuda f16: nan
|
||||
// cuda q8_0: pass
|
||||
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 128, 1);
|
||||
sd::Tensor<float> x({16, 16, 128, 1});
|
||||
// ggml_set_f32(x, 0.01f);
|
||||
// auto x = load_tensor_from_file(work_ctx, "chroma_x.bin");
|
||||
// auto x = load_tensor_from_file(ctx, "chroma_x.bin");
|
||||
// print_ggml_tensor(x);
|
||||
|
||||
std::vector<float> timesteps_vec(1, 1.f);
|
||||
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
|
||||
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
|
||||
|
||||
std::vector<float> guidance_vec(1, 0.f);
|
||||
auto guidance = vector_to_ggml_tensor(work_ctx, guidance_vec);
|
||||
auto guidance = sd::Tensor<float>::from_vector(guidance_vec);
|
||||
|
||||
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 15360, 256, 1);
|
||||
sd::Tensor<float> context({15360, 256, 1});
|
||||
// ggml_set_f32(context, 0.01f);
|
||||
// auto context = load_tensor_from_file(work_ctx, "chroma_context.bin");
|
||||
// auto context = load_tensor_from_file(ctx, "chroma_context.bin");
|
||||
// print_ggml_tensor(context);
|
||||
|
||||
// auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, 1);
|
||||
// auto y = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 768, 1);
|
||||
// ggml_set_f32(y, 0.01f);
|
||||
auto y = nullptr;
|
||||
// print_ggml_tensor(y);
|
||||
|
||||
struct ggml_tensor* out = nullptr;
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx);
|
||||
auto out_opt = compute(8,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
{},
|
||||
{},
|
||||
guidance,
|
||||
{},
|
||||
false);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("flux test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,8 @@
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include "ggml.h"
|
||||
#include "tensor.hpp"
|
||||
|
||||
const float wan_21_latent_rgb_proj[16][3] = {
|
||||
{0.015123f, -0.148418f, 0.479828f},
|
||||
@ -163,7 +165,7 @@ const float sd_latent_rgb_proj[4][3] = {
|
||||
{-0.178022f, -0.200862f, -0.678514f}};
|
||||
float sd_latent_rgb_bias[3] = {-0.017478f, -0.055834f, -0.105825f};
|
||||
|
||||
void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
|
||||
void preview_latent_video(uint8_t* buffer, ggml_tensor* latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
|
||||
size_t buffer_head = 0;
|
||||
|
||||
uint32_t latent_width = static_cast<uint32_t>(latents->ne[0]);
|
||||
@ -232,3 +234,67 @@ void preview_latent_video(uint8_t* buffer, struct ggml_tensor* latents, const fl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool preview_latent_tensor_is_video(const sd::Tensor<float>& latents) {
|
||||
return latents.dim() == 5;
|
||||
}
|
||||
|
||||
void preview_latent_video(uint8_t* buffer, const sd::Tensor<float>& latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
|
||||
uint32_t latent_width = static_cast<uint32_t>(latents.shape()[0]);
|
||||
uint32_t latent_height = static_cast<uint32_t>(latents.shape()[1]);
|
||||
bool is_video = preview_latent_tensor_is_video(latents);
|
||||
uint32_t frames = is_video ? static_cast<uint32_t>(latents.shape()[2]) : 1;
|
||||
uint32_t dim = is_video ? static_cast<uint32_t>(latents.shape()[3]) : static_cast<uint32_t>(latents.shape()[2]);
|
||||
|
||||
uint32_t rgb_width = latent_width * patch_size;
|
||||
uint32_t rgb_height = latent_height * patch_size;
|
||||
uint32_t unpatched_dim = dim / (patch_size * patch_size);
|
||||
|
||||
for (uint32_t k = 0; k < frames; k++) {
|
||||
for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) {
|
||||
for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) {
|
||||
uint32_t latent_x = rgb_x / patch_size;
|
||||
uint32_t latent_y = rgb_y / patch_size;
|
||||
|
||||
uint32_t channel_offset = 0;
|
||||
if (patch_size > 1) {
|
||||
channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size));
|
||||
}
|
||||
|
||||
size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x;
|
||||
auto latent_value = [&](uint32_t latent_channel) -> float {
|
||||
return is_video
|
||||
? latents.values()[latent_x + latent_width * (latent_y + latent_height * (k + frames * latent_channel))]
|
||||
: latents.values()[latent_x + latent_width * (latent_y + latent_height * latent_channel)];
|
||||
};
|
||||
|
||||
float r = 0.f, g = 0.f, b = 0.f;
|
||||
if (latent_rgb_proj != nullptr) {
|
||||
for (uint32_t d = 0; d < unpatched_dim; d++) {
|
||||
uint32_t latent_channel = d * patch_size * patch_size + channel_offset;
|
||||
float value = latent_value(latent_channel);
|
||||
r += value * latent_rgb_proj[d][0];
|
||||
g += value * latent_rgb_proj[d][1];
|
||||
b += value * latent_rgb_proj[d][2];
|
||||
}
|
||||
} else {
|
||||
r = latent_value(0);
|
||||
g = latent_value(1);
|
||||
b = latent_value(2);
|
||||
}
|
||||
if (latent_rgb_bias != nullptr) {
|
||||
r += latent_rgb_bias[0];
|
||||
g += latent_rgb_bias[1];
|
||||
b += latent_rgb_bias[2];
|
||||
}
|
||||
r = std::min(1.0f, std::max(0.0f, r * .5f + .5f));
|
||||
g = std::min(1.0f, std::max(0.0f, g * .5f + .5f));
|
||||
b = std::min(1.0f, std::max(0.0f, b * .5f + .5f));
|
||||
|
||||
buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255);
|
||||
buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255);
|
||||
buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
246
src/llm.hpp
246
src/llm.hpp
@ -194,6 +194,7 @@ namespace LLM {
|
||||
bool padding = false) {
|
||||
if (add_bos_token) {
|
||||
tokens.insert(tokens.begin(), BOS_TOKEN_ID);
|
||||
weights.insert(weights.begin(), 1.f);
|
||||
}
|
||||
if (max_length > 0 && padding) {
|
||||
size_t n = static_cast<size_t>(std::ceil(tokens.size() * 1.f / max_length));
|
||||
@ -522,7 +523,7 @@ namespace LLM {
|
||||
blocks["down_proj"] = std::shared_ptr<GGMLBlock>(new Linear(intermediate_size, hidden_size, bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
auto gate_proj = std::dynamic_pointer_cast<Linear>(blocks["gate_proj"]);
|
||||
auto up_proj = std::dynamic_pointer_cast<Linear>(blocks["up_proj"]);
|
||||
@ -582,7 +583,7 @@ namespace LLM {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N*grid_t*grid_h*grid_w, in_channels, temporal_patch_size*patch_size*patch_size]
|
||||
// return: [N*grid_t*grid_h*grid_w, embed_dim]
|
||||
x = ggml_reshape_4d(ctx->ggml_ctx,
|
||||
@ -631,7 +632,7 @@ namespace LLM {
|
||||
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto ln_q = std::dynamic_pointer_cast<RMSNorm>(blocks["ln_q"]);
|
||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
||||
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["mlp.2"]);
|
||||
@ -668,10 +669,10 @@ namespace LLM {
|
||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
int64_t n_token = x->ne[1];
|
||||
int64_t N = x->ne[2];
|
||||
@ -718,10 +719,10 @@ namespace LLM {
|
||||
blocks["norm2"] = std::shared_ptr<GGMLBlock>(new RMSNorm(hidden_size, eps));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
auto attn = std::dynamic_pointer_cast<VisionAttention>(blocks["attn"]);
|
||||
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
|
||||
@ -778,12 +779,12 @@ namespace LLM {
|
||||
blocks["merger"] = std::shared_ptr<GGMLBlock>(new PatchMerger(out_hidden_size, hidden_size, spatial_merge_size));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* pixel_values,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* window_index,
|
||||
struct ggml_tensor* window_inverse_index,
|
||||
struct ggml_tensor* window_mask) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* pixel_values,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* window_index,
|
||||
ggml_tensor* window_inverse_index,
|
||||
ggml_tensor* window_mask) {
|
||||
// pixel_values: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw]
|
||||
// window_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
|
||||
// window_inverse_index: [grid_t*(H/mh/ph)*(W/mw/pw)]
|
||||
@ -836,10 +837,10 @@ namespace LLM {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* input_pos,
|
||||
struct ggml_tensor* attention_mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* input_pos,
|
||||
ggml_tensor* attention_mask = nullptr) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
int64_t n_token = x->ne[1];
|
||||
int64_t N = x->ne[2];
|
||||
@ -898,10 +899,10 @@ namespace LLM {
|
||||
blocks["post_attention_layernorm"] = std::make_shared<RMSNorm>(params.hidden_size, params.rms_norm_eps);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* input_pos,
|
||||
struct ggml_tensor* attention_mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* input_pos,
|
||||
ggml_tensor* attention_mask = nullptr) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
auto self_attn = std::dynamic_pointer_cast<Attention>(blocks["self_attn"]);
|
||||
auto mlp = std::dynamic_pointer_cast<MLP>(blocks["mlp"]);
|
||||
@ -936,10 +937,10 @@ namespace LLM {
|
||||
blocks["norm"] = std::shared_ptr<GGMLBlock>(new RMSNorm(params.hidden_size, params.rms_norm_eps));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* input_pos,
|
||||
struct ggml_tensor* attention_mask,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* input_pos,
|
||||
ggml_tensor* attention_mask,
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||
std::set<int> out_layers) {
|
||||
// input_ids: [N, n_token]
|
||||
@ -1037,10 +1038,10 @@ namespace LLM {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* input_pos,
|
||||
struct ggml_tensor* attention_mask,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* input_pos,
|
||||
ggml_tensor* attention_mask,
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||
std::set<int> out_layers) {
|
||||
// input_ids: [N, n_token]
|
||||
@ -1050,12 +1051,12 @@ namespace LLM {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* pixel_values,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* window_index,
|
||||
struct ggml_tensor* window_inverse_index,
|
||||
struct ggml_tensor* window_mask) {
|
||||
ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* pixel_values,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* window_index,
|
||||
ggml_tensor* window_inverse_index,
|
||||
ggml_tensor* window_mask) {
|
||||
GGML_ASSERT(enable_vision);
|
||||
auto vision_model = std::dynamic_pointer_cast<VisionModel>(blocks["visual"]);
|
||||
return vision_model->forward(ctx, pixel_values, pe, window_index, window_inverse_index, window_mask);
|
||||
@ -1156,40 +1157,41 @@ namespace LLM {
|
||||
return llm_arch_to_str[static_cast<int>(params.arch)];
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
model.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* input_pos,
|
||||
struct ggml_tensor* attention_mask,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* input_pos,
|
||||
ggml_tensor* attention_mask,
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||
std::set<int> out_layers) {
|
||||
auto hidden_states = model.forward(ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers); // [N, n_token, hidden_size]
|
||||
return hidden_states;
|
||||
}
|
||||
|
||||
struct ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* pixel_values,
|
||||
struct ggml_tensor* input_pos,
|
||||
struct ggml_tensor* window_index,
|
||||
struct ggml_tensor* window_inverse_index,
|
||||
struct ggml_tensor* window_mask) {
|
||||
ggml_tensor* vision_forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* pixel_values,
|
||||
ggml_tensor* input_pos,
|
||||
ggml_tensor* window_index,
|
||||
ggml_tensor* window_inverse_index,
|
||||
ggml_tensor* window_mask) {
|
||||
auto hidden_states = model.vision_forward(ctx, pixel_values, input_pos, window_index, window_inverse_index, window_mask);
|
||||
return hidden_states;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* attention_mask,
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
|
||||
const sd::Tensor<float>& attention_mask_tensor,
|
||||
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds_tensor,
|
||||
std::set<int> out_layers) {
|
||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
|
||||
input_ids = to_backend(input_ids);
|
||||
|
||||
for (auto& image_embed : image_embeds) {
|
||||
image_embed.second = to_backend(image_embed.second);
|
||||
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
ggml_tensor* input_ids = make_input(input_ids_tensor);
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds;
|
||||
image_embeds.reserve(image_embeds_tensor.size());
|
||||
for (const auto& [idx, embed_tensor] : image_embeds_tensor) {
|
||||
ggml_tensor* embed = make_input(embed_tensor);
|
||||
image_embeds.emplace_back(idx, embed);
|
||||
}
|
||||
|
||||
int64_t n_tokens = input_ids->ne[0];
|
||||
@ -1213,8 +1215,9 @@ namespace LLM {
|
||||
input_pos_vec.size());
|
||||
set_backend_tensor_data(input_pos, input_pos_vec.data());
|
||||
|
||||
if (attention_mask != nullptr) {
|
||||
attention_mask = to_backend(attention_mask);
|
||||
ggml_tensor* attention_mask = nullptr;
|
||||
if (!attention_mask_tensor.empty()) {
|
||||
attention_mask = make_input(attention_mask_tensor);
|
||||
} else {
|
||||
attention_mask_vec.resize(n_tokens * n_tokens);
|
||||
for (int i0 = 0; i0 < n_tokens; i0++) {
|
||||
@ -1232,24 +1235,22 @@ namespace LLM {
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers);
|
||||
ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, input_pos, attention_mask, image_embeds, out_layers);
|
||||
|
||||
ggml_build_forward_expand(gf, hidden_states);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* attention_mask,
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
|
||||
std::set<int> out_layers,
|
||||
ggml_tensor** output,
|
||||
ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
sd::Tensor<float> compute(const int n_threads,
|
||||
const sd::Tensor<int32_t>& input_ids,
|
||||
const sd::Tensor<float>& attention_mask,
|
||||
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds,
|
||||
std::set<int> out_layers) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(input_ids, attention_mask, image_embeds, out_layers);
|
||||
};
|
||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true));
|
||||
}
|
||||
|
||||
int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) {
|
||||
@ -1261,7 +1262,7 @@ namespace LLM {
|
||||
return grid_t * grid_h * grid_w;
|
||||
}
|
||||
|
||||
struct ggml_tensor* process_image(struct ggml_context* ctx, struct ggml_tensor* image) {
|
||||
ggml_tensor* process_image(ggml_context* ctx, ggml_tensor* image) {
|
||||
// image: [C, H, W]
|
||||
// return: [grid_t*(H/mh/ph)*(W/mw/pw)*mh*mw, C*pt*ph*pw], grid_t == 1
|
||||
int64_t C = image->ne[2];
|
||||
@ -1288,8 +1289,9 @@ namespace LLM {
|
||||
return image;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_encode_image_graph(struct ggml_tensor* image) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
|
||||
ggml_cgraph* build_encode_image_graph(const sd::Tensor<float>& image_tensor) {
|
||||
ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
|
||||
ggml_tensor* image = make_input(image_tensor);
|
||||
|
||||
GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
|
||||
GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
|
||||
@ -1301,8 +1303,6 @@ namespace LLM {
|
||||
int llm_grid_w = grid_w / params.vision.spatial_merge_size;
|
||||
int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size;
|
||||
|
||||
image = to_backend(image);
|
||||
|
||||
auto pixel_values = process_image(compute_ctx, image);
|
||||
|
||||
// window index
|
||||
@ -1400,7 +1400,7 @@ namespace LLM {
|
||||
set_backend_tensor_data(pe, pe_vec.data());
|
||||
|
||||
auto runnter_ctx = get_context();
|
||||
struct ggml_tensor* hidden_states = vision_forward(&runnter_ctx,
|
||||
ggml_tensor* hidden_states = vision_forward(&runnter_ctx,
|
||||
pixel_values,
|
||||
pe,
|
||||
window_index,
|
||||
@ -1411,14 +1411,12 @@ namespace LLM {
|
||||
return gf;
|
||||
}
|
||||
|
||||
void encode_image(const int n_threads,
|
||||
struct ggml_tensor* image,
|
||||
ggml_tensor** output,
|
||||
ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
sd::Tensor<float> encode_image(const int n_threads,
|
||||
const sd::Tensor<float>& image) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_encode_image_graph(image);
|
||||
};
|
||||
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, false));
|
||||
}
|
||||
};
|
||||
|
||||
@ -1440,7 +1438,7 @@ namespace LLM {
|
||||
}
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
model.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -1492,44 +1490,46 @@ namespace LLM {
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
bool test_mistral = false;
|
||||
bool test_qwen3 = true;
|
||||
bool test_vit = false;
|
||||
bool test_decoder_with_vit = false;
|
||||
|
||||
if (test_decoder_with_vit) {
|
||||
ggml_tensor* image_embed = nullptr;
|
||||
sd::Tensor<float> image_embed;
|
||||
{
|
||||
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
|
||||
print_ggml_tensor(image, false, "image");
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin");
|
||||
print_sd_tensor(image, false, "image");
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.encode_image(8, image, &out, work_ctx);
|
||||
auto out_opt = model.encode_image(8, image);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out, false, "image_embed");
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out, false, "image_embed");
|
||||
image_embed = out;
|
||||
LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0);
|
||||
}
|
||||
|
||||
std::string placeholder = "<|image_pad|>";
|
||||
std::string img_prompt = "Picture 1: <|vision_start|>"; // [24669, 220, 16, 25, 220, 151652]
|
||||
int64_t num_image_tokens = image_embed->ne[1];
|
||||
int64_t num_image_tokens = image_embed.shape()[1];
|
||||
img_prompt.reserve(num_image_tokens * placeholder.size());
|
||||
for (int i = 0; i < num_image_tokens; i++) {
|
||||
img_prompt += placeholder;
|
||||
}
|
||||
img_prompt += "<|vision_end|>";
|
||||
|
||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds;
|
||||
std::vector<std::pair<int, sd::Tensor<float>>> image_embeds;
|
||||
image_embeds.emplace_back(64, image_embed);
|
||||
|
||||
std::pair<int, int> prompt_attn_range;
|
||||
@ -1547,29 +1547,33 @@ namespace LLM {
|
||||
printf("%d ", token);
|
||||
}
|
||||
printf("\n");
|
||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx);
|
||||
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), image_embeds, {});
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("llm test done in %lldms", t1 - t0);
|
||||
} else if (test_vit) {
|
||||
// auto image = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 280, 280, 3);
|
||||
// auto image = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 280, 280, 3);
|
||||
// ggml_set_f32(image, 0.f);
|
||||
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
|
||||
print_ggml_tensor(image, false, "image");
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin");
|
||||
print_sd_tensor(image, false, "image");
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.encode_image(8, image, &out, work_ctx);
|
||||
auto out_opt = model.encode_image(8, image);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out, false, "out");
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out, false, "out");
|
||||
|
||||
// auto ref_out = load_tensor_from_file(work_ctx, "qwen2vl.bin");
|
||||
// auto ref_out = load_tensor_from_file(ctx, "qwen2vl.bin");
|
||||
// ggml_ext_tensor_diff(ref_out, out, 0.01f);
|
||||
|
||||
LOG_DEBUG("llm test done in %lldms", t1 - t0);
|
||||
@ -1587,14 +1591,16 @@ namespace LLM {
|
||||
printf("%d ", token);
|
||||
}
|
||||
printf("\n");
|
||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx);
|
||||
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {10, 20, 30});
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("llm test done in %lldms", t1 - t0);
|
||||
} else if (test_qwen3) {
|
||||
std::pair<int, int> prompt_attn_range;
|
||||
@ -1610,14 +1616,16 @@ namespace LLM {
|
||||
printf("%d ", token);
|
||||
}
|
||||
printf("\n");
|
||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx);
|
||||
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {35});
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("llm test done in %lldms", t1 - t0);
|
||||
} else {
|
||||
std::pair<int, int> prompt_attn_range;
|
||||
@ -1633,14 +1641,16 @@ namespace LLM {
|
||||
printf("%d ", token);
|
||||
}
|
||||
printf("\n");
|
||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx);
|
||||
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {});
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("llm test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
18
src/lora.hpp
18
src/lora.hpp
@ -9,7 +9,7 @@
|
||||
struct LoraModel : public GGMLRunner {
|
||||
std::string lora_id;
|
||||
float multiplier = 1.0f;
|
||||
std::unordered_map<std::string, struct ggml_tensor*> lora_tensors;
|
||||
std::unordered_map<std::string, ggml_tensor*> lora_tensors;
|
||||
std::map<ggml_tensor*, ggml_tensor*> original_tensor_to_final_tensor;
|
||||
std::set<std::string> applied_lora_tensors;
|
||||
std::string file_path;
|
||||
@ -78,7 +78,7 @@ struct LoraModel : public GGMLRunner {
|
||||
for (const auto& pair : tensors_to_create) {
|
||||
const auto& name = pair.first;
|
||||
const auto& ts = pair.second;
|
||||
struct ggml_tensor* real = ggml_new_tensor(params_ctx,
|
||||
ggml_tensor* real = ggml_new_tensor(params_ctx,
|
||||
ts.type,
|
||||
ts.n_dims,
|
||||
ts.ne);
|
||||
@ -337,8 +337,8 @@ struct LoraModel : public GGMLRunner {
|
||||
}
|
||||
scale_value *= multiplier;
|
||||
|
||||
struct ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid);
|
||||
struct ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid);
|
||||
ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid);
|
||||
ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid);
|
||||
auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2);
|
||||
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
|
||||
if (updown == nullptr) {
|
||||
@ -747,9 +747,9 @@ struct LoraModel : public GGMLRunner {
|
||||
return out_diff;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_lora_graph(const std::map<std::string, ggml_tensor*>& model_tensors, SDVersion version) {
|
||||
ggml_cgraph* build_lora_graph(const std::map<std::string, ggml_tensor*>& model_tensors, SDVersion version) {
|
||||
size_t lora_graph_size = LORA_GRAPH_BASE_SIZE + lora_tensors.size() * 10;
|
||||
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, lora_graph_size, false);
|
||||
ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, lora_graph_size, false);
|
||||
|
||||
preprocess_lora_tensors(model_tensors);
|
||||
|
||||
@ -788,11 +788,11 @@ struct LoraModel : public GGMLRunner {
|
||||
return gf;
|
||||
}
|
||||
|
||||
void apply(std::map<std::string, struct ggml_tensor*> model_tensors, SDVersion version, int n_threads) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
void apply(std::map<std::string, ggml_tensor*> model_tensors, SDVersion version, int n_threads) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_lora_graph(model_tensors, version);
|
||||
};
|
||||
GGMLRunner::compute(get_graph, n_threads, false);
|
||||
GGMLRunner::compute<float>(get_graph, n_threads, false, true);
|
||||
stat();
|
||||
for (auto item : original_tensor_to_final_tensor) {
|
||||
ggml_tensor* original_tensor = item.first;
|
||||
|
||||
@ -26,8 +26,8 @@ namespace LTXV {
|
||||
bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
bool causal = true) {
|
||||
// x: [N*IC, ID, IH, IW]
|
||||
// result: [N*OC, OD, OH, OW]
|
||||
|
||||
190
src/mmdit.hpp
190
src/mmdit.hpp
@ -27,7 +27,7 @@ public:
|
||||
blocks["fc2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_features, out_features, bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, n_token, in_features]
|
||||
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
||||
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
||||
@ -72,7 +72,7 @@ public:
|
||||
bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, C, H, W]
|
||||
// return: [N, H*W, embed_dim]
|
||||
auto proj = std::dynamic_pointer_cast<Conv2d>(blocks["proj"]);
|
||||
@ -111,7 +111,7 @@ public:
|
||||
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, out_channels, true, true));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* t) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* t) {
|
||||
// t: [N, ]
|
||||
// return: [N, hidden_size]
|
||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
||||
@ -135,7 +135,7 @@ public:
|
||||
blocks["mlp.2"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, hidden_size, true, true));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, input_dim]
|
||||
// return: [N, hidden_size]
|
||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
||||
@ -175,7 +175,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<struct ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
std::vector<ggml_tensor*> pre_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
||||
|
||||
auto qkv = qkv_proj->forward(ctx, x);
|
||||
@ -198,7 +198,7 @@ public:
|
||||
return {q, k, v};
|
||||
}
|
||||
|
||||
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* post_attention(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
GGML_ASSERT(!pre_only);
|
||||
|
||||
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
|
||||
@ -208,8 +208,8 @@ public:
|
||||
}
|
||||
|
||||
// x: [N, n_token, dim]
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x) {
|
||||
auto qkv = pre_attention(ctx, x);
|
||||
x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, ctx->flash_attn_enabled); // [N, n_token, dim]
|
||||
x = post_attention(ctx, x); // [N, n_token, dim]
|
||||
@ -217,10 +217,10 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* shift,
|
||||
struct ggml_tensor* scale) {
|
||||
__STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* shift,
|
||||
ggml_tensor* scale) {
|
||||
// x: [N, L, C]
|
||||
// scale: [N, C]
|
||||
// shift: [N, C]
|
||||
@ -274,8 +274,8 @@ public:
|
||||
}
|
||||
|
||||
std::tuple<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention_x(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
GGML_ASSERT(self_attn);
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
@ -309,9 +309,9 @@ public:
|
||||
return {qkv, qkv2, {x, gate_msa, shift_mlp, scale_mlp, gate_mlp, gate_msa2}};
|
||||
}
|
||||
|
||||
std::pair<std::vector<struct ggml_tensor*>, std::vector<struct ggml_tensor*>> pre_attention(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
std::pair<std::vector<ggml_tensor*>, std::vector<ggml_tensor*>> pre_attention(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
auto norm1 = std::dynamic_pointer_cast<LayerNorm>(blocks["norm1"]);
|
||||
@ -346,15 +346,15 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* post_attention_x(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* attn_out,
|
||||
struct ggml_tensor* attn2_out,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* gate_msa,
|
||||
struct ggml_tensor* shift_mlp,
|
||||
struct ggml_tensor* scale_mlp,
|
||||
struct ggml_tensor* gate_mlp,
|
||||
struct ggml_tensor* gate_msa2) {
|
||||
ggml_tensor* post_attention_x(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* attn_out,
|
||||
ggml_tensor* attn2_out,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* gate_msa,
|
||||
ggml_tensor* shift_mlp,
|
||||
ggml_tensor* scale_mlp,
|
||||
ggml_tensor* gate_mlp,
|
||||
ggml_tensor* gate_msa2) {
|
||||
// attn_out: [N, n_token, hidden_size]
|
||||
// x: [N, n_token, hidden_size]
|
||||
// gate_msa: [N, hidden_size]
|
||||
@ -384,13 +384,13 @@ public:
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* post_attention(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* attn_out,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* gate_msa,
|
||||
struct ggml_tensor* shift_mlp,
|
||||
struct ggml_tensor* scale_mlp,
|
||||
struct ggml_tensor* gate_mlp) {
|
||||
ggml_tensor* post_attention(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* attn_out,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* gate_msa,
|
||||
ggml_tensor* shift_mlp,
|
||||
ggml_tensor* scale_mlp,
|
||||
ggml_tensor* gate_mlp) {
|
||||
// attn_out: [N, n_token, hidden_size]
|
||||
// x: [N, n_token, hidden_size]
|
||||
// gate_msa: [N, hidden_size]
|
||||
@ -416,9 +416,9 @@ public:
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
// return: [N, n_token, hidden_size]
|
||||
@ -463,11 +463,11 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
__STATIC_INLINE__ std::pair<struct ggml_tensor*, struct ggml_tensor*>
|
||||
__STATIC_INLINE__ std::pair<ggml_tensor*, ggml_tensor*>
|
||||
block_mixing(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c,
|
||||
std::shared_ptr<DismantledBlock> context_block,
|
||||
std::shared_ptr<DismantledBlock> x_block) {
|
||||
// context: [N, n_context, hidden_size]
|
||||
@ -489,7 +489,7 @@ block_mixing(GGMLRunnerContext* ctx,
|
||||
x_qkv = x_qkv_intermediates.first;
|
||||
x_intermediates = x_qkv_intermediates.second;
|
||||
}
|
||||
std::vector<struct ggml_tensor*> qkv;
|
||||
std::vector<ggml_tensor*> qkv;
|
||||
for (int i = 0; i < 3; i++) {
|
||||
qkv.push_back(ggml_concat(ctx->ggml_ctx, context_qkv[i], x_qkv[i], 1));
|
||||
}
|
||||
@ -563,10 +563,10 @@ public:
|
||||
blocks["x_block"] = std::shared_ptr<GGMLBlock>(new DismantledBlock(hidden_size, num_heads, mlp_ratio, qk_norm, qkv_bias, false, self_attn_x));
|
||||
}
|
||||
|
||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
auto context_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["context_block"]);
|
||||
auto x_block = std::dynamic_pointer_cast<DismantledBlock>(blocks["x_block"]);
|
||||
|
||||
@ -586,9 +586,9 @@ public:
|
||||
blocks["adaLN_modulation.1"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_size, 2 * hidden_size));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||
@ -626,7 +626,7 @@ protected:
|
||||
int64_t hidden_size;
|
||||
std::string qk_norm;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, std::string prefix = "") override {
|
||||
enum ggml_type wtype = GGML_TYPE_F32;
|
||||
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
|
||||
}
|
||||
@ -705,8 +705,8 @@ public:
|
||||
blocks["final_layer"] = std::shared_ptr<GGMLBlock>(new FinalLayer(hidden_size, patch_size, out_channels));
|
||||
}
|
||||
|
||||
struct ggml_tensor*
|
||||
cropped_pos_embed(struct ggml_context* ctx,
|
||||
ggml_tensor*
|
||||
cropped_pos_embed(ggml_context* ctx,
|
||||
int64_t h,
|
||||
int64_t w) {
|
||||
auto pos_embed = params["pos_embed"];
|
||||
@ -745,10 +745,10 @@ public:
|
||||
return spatial_pos_embed;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c_mod,
|
||||
struct ggml_tensor* context,
|
||||
ggml_tensor* forward_core_with_concat(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c_mod,
|
||||
ggml_tensor* context,
|
||||
std::vector<int> skip_layers = std::vector<int>()) {
|
||||
// x: [N, H*W, hidden_size]
|
||||
// context: [N, n_context, d_context]
|
||||
@ -774,11 +774,11 @@ public:
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* t,
|
||||
struct ggml_tensor* y = nullptr,
|
||||
struct ggml_tensor* context = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* t,
|
||||
ggml_tensor* y = nullptr,
|
||||
ggml_tensor* context = nullptr,
|
||||
std::vector<int> skip_layers = std::vector<int>()) {
|
||||
// Forward pass of DiT.
|
||||
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
|
||||
@ -832,24 +832,24 @@ struct MMDiTRunner : public GGMLRunner {
|
||||
return "mmdit";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
mmdit.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* y,
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor = {},
|
||||
const sd::Tensor<float>& y_tensor = {},
|
||||
std::vector<int> skip_layers = std::vector<int>()) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
|
||||
ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
|
||||
|
||||
x = to_backend(x);
|
||||
context = to_backend(context);
|
||||
y = to_backend(y);
|
||||
timesteps = to_backend(timesteps);
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
ggml_tensor* context = make_optional_input(context_tensor);
|
||||
ggml_tensor* y = make_optional_input(y_tensor);
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
struct ggml_tensor* out = mmdit.forward(&runner_ctx,
|
||||
ggml_tensor* out = mmdit.forward(&runner_ctx,
|
||||
x,
|
||||
timesteps,
|
||||
y,
|
||||
@ -861,60 +861,64 @@ struct MMDiTRunner : public GGMLRunner {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* y,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr,
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context = {},
|
||||
const sd::Tensor<float>& y = {},
|
||||
std::vector<int> skip_layers = std::vector<int>()) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// timesteps: [N, ]
|
||||
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
|
||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, y, skip_layers);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// cpu f16: pass
|
||||
// cpu f32: pass
|
||||
// cuda f16: pass
|
||||
// cuda f32: pass
|
||||
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 128, 128, 16, 1);
|
||||
sd::Tensor<float> x({128, 128, 16, 1});
|
||||
std::vector<float> timesteps_vec(1, 999.f);
|
||||
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
|
||||
ggml_set_f32(x, 0.01f);
|
||||
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
|
||||
x.fill_(0.01f);
|
||||
// print_ggml_tensor(x);
|
||||
|
||||
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 154, 1);
|
||||
ggml_set_f32(context, 0.01f);
|
||||
sd::Tensor<float> context({4096, 154, 1});
|
||||
context.fill_(0.01f);
|
||||
// print_ggml_tensor(context);
|
||||
|
||||
auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 2048, 1);
|
||||
ggml_set_f32(y, 0.01f);
|
||||
sd::Tensor<float> y({2048, 1});
|
||||
y.fill_(0.01f);
|
||||
// print_ggml_tensor(y);
|
||||
|
||||
struct ggml_tensor* out = nullptr;
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, x, timesteps, context, y, &out, work_ctx);
|
||||
auto out_opt = compute(8,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
y);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("mmdit test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -162,43 +162,7 @@ uint16_t f8_e4m3_to_f16(uint8_t f8) {
|
||||
}
|
||||
|
||||
uint16_t f8_e5m2_to_f16(uint8_t fp8) {
|
||||
uint8_t sign = (fp8 >> 7) & 0x1;
|
||||
uint8_t exponent = (fp8 >> 2) & 0x1F;
|
||||
uint8_t mantissa = fp8 & 0x3;
|
||||
|
||||
uint16_t fp16_sign = sign << 15;
|
||||
uint16_t fp16_exponent;
|
||||
uint16_t fp16_mantissa;
|
||||
|
||||
if (exponent == 0 && mantissa == 0) { // zero
|
||||
return fp16_sign;
|
||||
}
|
||||
|
||||
if (exponent == 0x1F) { // NAN and INF
|
||||
fp16_exponent = 0x1F;
|
||||
fp16_mantissa = mantissa ? (mantissa << 8) : 0;
|
||||
return fp16_sign | (fp16_exponent << 10) | fp16_mantissa;
|
||||
}
|
||||
|
||||
if (exponent == 0) { // subnormal numbers
|
||||
fp16_mantissa = (mantissa << 8);
|
||||
return fp16_sign | fp16_mantissa;
|
||||
}
|
||||
|
||||
// normal numbers
|
||||
int16_t true_exponent = (int16_t)exponent - 15 + 15;
|
||||
if (true_exponent <= 0) {
|
||||
fp16_exponent = 0;
|
||||
fp16_mantissa = (mantissa << 8);
|
||||
} else if (true_exponent >= 0x1F) {
|
||||
fp16_exponent = 0x1F;
|
||||
fp16_mantissa = 0;
|
||||
} else {
|
||||
fp16_exponent = (uint16_t)true_exponent;
|
||||
fp16_mantissa = mantissa << 8;
|
||||
}
|
||||
|
||||
return fp16_sign | (fp16_exponent << 10) | fp16_mantissa;
|
||||
return static_cast<uint16_t>(fp8) << 8;
|
||||
}
|
||||
|
||||
void f8_e4m3_to_f16_vec(uint8_t* src, uint16_t* dst, int64_t n) {
|
||||
@ -287,7 +251,7 @@ void ModelLoader::add_tensor_storage(const TensorStorage& tensor_storage) {
|
||||
}
|
||||
|
||||
bool is_zip_file(const std::string& file_path) {
|
||||
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
||||
zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
||||
if (zip == nullptr) {
|
||||
return false;
|
||||
}
|
||||
@ -454,7 +418,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s
|
||||
size_t data_offset = gguf_get_data_offset(ctx_gguf_);
|
||||
for (int i = 0; i < n_tensors; i++) {
|
||||
std::string name = gguf_get_tensor_name(ctx_gguf_, i);
|
||||
struct ggml_tensor* dummy = ggml_get_tensor(ctx_meta_, name.c_str());
|
||||
ggml_tensor* dummy = ggml_get_tensor(ctx_meta_, name.c_str());
|
||||
size_t offset = data_offset + gguf_get_tensor_offset(ctx_gguf_, i);
|
||||
|
||||
// LOG_DEBUG("%s", name.c_str());
|
||||
@ -812,7 +776,7 @@ struct PickleTensorReader {
|
||||
}
|
||||
}
|
||||
|
||||
void read_string(const std::string& str, struct zip_t* zip, std::string dir) {
|
||||
void read_string(const std::string& str, zip_t* zip, std::string dir) {
|
||||
if (str == "storage") {
|
||||
read_global_type = true;
|
||||
} else if (str != "state_dict") {
|
||||
@ -995,7 +959,7 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
|
||||
file_paths_.push_back(file_path);
|
||||
size_t file_index = file_paths_.size() - 1;
|
||||
|
||||
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
||||
zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
|
||||
if (zip == nullptr) {
|
||||
LOG_ERROR("failed to open '%s'", file_path.c_str());
|
||||
return false;
|
||||
@ -1104,10 +1068,12 @@ SDVersion ModelLoader::get_sd_version() {
|
||||
tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) {
|
||||
has_middle_block_1 = true;
|
||||
}
|
||||
if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos) {
|
||||
if (tensor_storage.name.find("model.diffusion_model.output_blocks.3.1.transformer_blocks.1") != std::string::npos ||
|
||||
tensor_storage.name.find("unet.up_blocks.1.attentions.0.transformer_blocks.1") != std::string::npos) {
|
||||
has_output_block_311 = true;
|
||||
}
|
||||
if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos) {
|
||||
if (tensor_storage.name.find("model.diffusion_model.output_blocks.7.1") != std::string::npos ||
|
||||
tensor_storage.name.find("unet.up_blocks.2.attentions.1") != std::string::npos) {
|
||||
has_output_block_71 = true;
|
||||
}
|
||||
if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" ||
|
||||
@ -1411,7 +1377,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
||||
for (int i = 0; i < n_threads; ++i) {
|
||||
workers.emplace_back([&, file_path, is_zip]() {
|
||||
std::ifstream file;
|
||||
struct zip_t* zip = nullptr;
|
||||
zip_t* zip = nullptr;
|
||||
if (is_zip) {
|
||||
zip = zip_open(file_path.c_str(), 0, 'r');
|
||||
if (zip == nullptr) {
|
||||
@ -1599,7 +1565,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
||||
return success;
|
||||
}
|
||||
|
||||
bool ModelLoader::load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
|
||||
bool ModelLoader::load_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
||||
std::set<std::string> ignore_tensors,
|
||||
int n_threads,
|
||||
bool enable_mmap) {
|
||||
@ -1613,7 +1579,7 @@ bool ModelLoader::load_tensors(std::map<std::string, struct ggml_tensor*>& tenso
|
||||
tensor_names_in_file.insert(name);
|
||||
}
|
||||
|
||||
struct ggml_tensor* real;
|
||||
ggml_tensor* real;
|
||||
if (tensors.find(name) != tensors.end()) {
|
||||
real = tensors[name];
|
||||
} else {
|
||||
|
||||
@ -323,7 +323,7 @@ public:
|
||||
String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; }
|
||||
void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = "");
|
||||
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false);
|
||||
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
|
||||
bool load_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
||||
std::set<std::string> ignore_tensors = {},
|
||||
int n_threads = 0,
|
||||
bool use_mmap = false);
|
||||
|
||||
@ -1120,7 +1120,11 @@ std::string convert_tensor_name(std::string name, SDVersion version) {
|
||||
for (const auto& prefix : first_stage_model_prefix_vec) {
|
||||
if (starts_with(name, prefix)) {
|
||||
name = convert_first_stage_model_name(name.substr(prefix.size()), prefix);
|
||||
if (version == VERSION_SDXS) {
|
||||
name = "tae." + name;
|
||||
} else {
|
||||
name = prefix + name;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
173
src/pmid.hpp
173
src/pmid.hpp
@ -21,14 +21,14 @@ public:
|
||||
blocks["layernorm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(in_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
// x: [N, channels, h, w]
|
||||
|
||||
auto fc1 = std::dynamic_pointer_cast<Linear>(blocks["fc1"]);
|
||||
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
|
||||
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layernorm"]);
|
||||
|
||||
struct ggml_tensor* r = x;
|
||||
ggml_tensor* r = x;
|
||||
// x = ggml_ext_layer_norm(ctx, x, ln_w, ln_b);
|
||||
x = layer_norm->forward(ctx, x);
|
||||
// x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b);
|
||||
@ -54,8 +54,8 @@ public:
|
||||
blocks["1"] = std::shared_ptr<GGMLBlock>(new Mlp(dim, inner_dim, dim, false));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x) {
|
||||
auto norm = std::dynamic_pointer_cast<LayerNorm>(blocks["0"]);
|
||||
auto ff = std::dynamic_pointer_cast<Mlp>(blocks["1"]);
|
||||
|
||||
@ -81,8 +81,8 @@ public:
|
||||
blocks["to_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, dim, false));
|
||||
}
|
||||
|
||||
struct ggml_tensor* reshape_tensor(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* reshape_tensor(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int heads) {
|
||||
int64_t ne[4];
|
||||
for (int i = 0; i < 4; ++i)
|
||||
@ -92,17 +92,17 @@ public:
|
||||
return x;
|
||||
}
|
||||
|
||||
std::vector<struct ggml_tensor*> chunk_half(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x) {
|
||||
std::vector<ggml_tensor*> chunk_half(ggml_context* ctx,
|
||||
ggml_tensor* x) {
|
||||
auto tlo = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], 0);
|
||||
auto tli = ggml_view_4d(ctx, x, x->ne[0] / 2, x->ne[1], x->ne[2], x->ne[3], x->nb[1], x->nb[2], x->nb[3], x->nb[0] * x->ne[0] / 2);
|
||||
return {ggml_cont(ctx, tlo),
|
||||
ggml_cont(ctx, tli)};
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* latents) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* latents) {
|
||||
// x (torch.Tensor): image features
|
||||
// shape (b, n1, D)
|
||||
// latent (torch.Tensor): latent features
|
||||
@ -176,9 +176,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* latents,
|
||||
struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* latents,
|
||||
ggml_tensor* x) {
|
||||
// x: [N, channels, h, w]
|
||||
auto proj_in = std::dynamic_pointer_cast<Linear>(blocks["proj_in"]);
|
||||
auto proj_out = std::dynamic_pointer_cast<Linear>(blocks["proj_out"]);
|
||||
@ -225,9 +225,9 @@ public:
|
||||
4));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* last_hidden_state) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* last_hidden_state) {
|
||||
// x: [N, channels, h, w]
|
||||
auto token_proj = std::dynamic_pointer_cast<Mlp>(blocks["token_proj"]);
|
||||
auto token_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["token_norm"]);
|
||||
@ -237,7 +237,7 @@ public:
|
||||
int64_t nel = ggml_nelements(x);
|
||||
x = ggml_reshape_3d(ctx->ggml_ctx, x, cross_attention_dim, num_tokens, nel / (cross_attention_dim * num_tokens));
|
||||
x = token_norm->forward(ctx, x);
|
||||
struct ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state);
|
||||
ggml_tensor* out = perceiver_resampler->forward(ctx, x, last_hidden_state);
|
||||
if (use_residul)
|
||||
out = ggml_add(ctx->ggml_ctx, x, out);
|
||||
return out;
|
||||
@ -256,9 +256,9 @@ public:
|
||||
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new LayerNorm(embed_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* fuse_fn(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* prompt_embeds,
|
||||
struct ggml_tensor* id_embeds) {
|
||||
ggml_tensor* fuse_fn(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* prompt_embeds,
|
||||
ggml_tensor* id_embeds) {
|
||||
auto mlp1 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp1"]);
|
||||
auto mlp2 = std::dynamic_pointer_cast<FuseBlock>(blocks["mlp2"]);
|
||||
auto layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["layer_norm"]);
|
||||
@ -273,24 +273,24 @@ public:
|
||||
return stacked_id_embeds;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* prompt_embeds,
|
||||
struct ggml_tensor* id_embeds,
|
||||
struct ggml_tensor* class_tokens_mask,
|
||||
struct ggml_tensor* class_tokens_mask_pos,
|
||||
struct ggml_tensor* left,
|
||||
struct ggml_tensor* right) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* prompt_embeds,
|
||||
ggml_tensor* id_embeds,
|
||||
ggml_tensor* class_tokens_mask,
|
||||
ggml_tensor* class_tokens_mask_pos,
|
||||
ggml_tensor* left,
|
||||
ggml_tensor* right) {
|
||||
// x: [N, channels, h, w]
|
||||
|
||||
struct ggml_tensor* valid_id_embeds = id_embeds;
|
||||
ggml_tensor* valid_id_embeds = id_embeds;
|
||||
// # slice out the image token embeddings
|
||||
ggml_set_name(class_tokens_mask_pos, "class_tokens_mask_pos");
|
||||
ggml_set_name(prompt_embeds, "prompt_embeds");
|
||||
struct ggml_tensor* image_token_embeds = ggml_get_rows(ctx->ggml_ctx, prompt_embeds, class_tokens_mask_pos);
|
||||
ggml_tensor* image_token_embeds = ggml_get_rows(ctx->ggml_ctx, prompt_embeds, class_tokens_mask_pos);
|
||||
ggml_set_name(image_token_embeds, "image_token_embeds");
|
||||
valid_id_embeds = ggml_reshape_2d(ctx->ggml_ctx, valid_id_embeds, valid_id_embeds->ne[0],
|
||||
ggml_nelements(valid_id_embeds) / valid_id_embeds->ne[0]);
|
||||
struct ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds);
|
||||
ggml_tensor* stacked_id_embeds = fuse_fn(ctx, image_token_embeds, valid_id_embeds);
|
||||
|
||||
if (left && right) {
|
||||
stacked_id_embeds = ggml_concat(ctx->ggml_ctx, left, stacked_id_embeds, 1);
|
||||
@ -304,7 +304,7 @@ public:
|
||||
class_tokens_mask = ggml_cont(ctx->ggml_ctx, ggml_transpose(ctx->ggml_ctx, class_tokens_mask));
|
||||
class_tokens_mask = ggml_repeat(ctx->ggml_ctx, class_tokens_mask, prompt_embeds);
|
||||
prompt_embeds = ggml_mul(ctx->ggml_ctx, prompt_embeds, class_tokens_mask);
|
||||
struct ggml_tensor* updated_prompt_embeds = ggml_add(ctx->ggml_ctx, prompt_embeds, stacked_id_embeds);
|
||||
ggml_tensor* updated_prompt_embeds = ggml_add(ctx->ggml_ctx, prompt_embeds, stacked_id_embeds);
|
||||
ggml_set_name(updated_prompt_embeds, "updated_prompt_embeds");
|
||||
return updated_prompt_embeds;
|
||||
}
|
||||
@ -317,22 +317,22 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
|
||||
blocks["fuse_module"] = std::shared_ptr<GGMLBlock>(new FuseModule(2048));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* id_pixel_values,
|
||||
struct ggml_tensor* prompt_embeds,
|
||||
struct ggml_tensor* class_tokens_mask,
|
||||
struct ggml_tensor* class_tokens_mask_pos,
|
||||
struct ggml_tensor* left,
|
||||
struct ggml_tensor* right) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* id_pixel_values,
|
||||
ggml_tensor* prompt_embeds,
|
||||
ggml_tensor* class_tokens_mask,
|
||||
ggml_tensor* class_tokens_mask_pos,
|
||||
ggml_tensor* left,
|
||||
ggml_tensor* right) {
|
||||
// x: [N, channels, h, w]
|
||||
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
||||
auto visual_projection = std::dynamic_pointer_cast<CLIPProjection>(blocks["visual_projection"]);
|
||||
auto visual_projection_2 = std::dynamic_pointer_cast<Linear>(blocks["visual_projection_2"]);
|
||||
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
|
||||
|
||||
struct ggml_tensor* shared_id_embeds = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
||||
struct ggml_tensor* id_embeds = visual_projection->forward(ctx, shared_id_embeds); // [N, proj_dim(768)]
|
||||
struct ggml_tensor* id_embeds_2 = visual_projection_2->forward(ctx, shared_id_embeds); // [N, 1280]
|
||||
ggml_tensor* shared_id_embeds = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
||||
ggml_tensor* id_embeds = visual_projection->forward(ctx, shared_id_embeds); // [N, proj_dim(768)]
|
||||
ggml_tensor* id_embeds_2 = visual_projection_2->forward(ctx, shared_id_embeds); // [N, 1280]
|
||||
|
||||
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 2, 0, 1, 3));
|
||||
id_embeds_2 = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds_2, 2, 0, 1, 3));
|
||||
@ -340,7 +340,7 @@ struct PhotoMakerIDEncoderBlock : public CLIPVisionModelProjection {
|
||||
id_embeds = ggml_concat(ctx->ggml_ctx, id_embeds, id_embeds_2, 2); // [batch_size, seq_length, 1, 2048] check whether concat at dim 2 is right
|
||||
id_embeds = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, id_embeds, 1, 2, 0, 3));
|
||||
|
||||
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
||||
ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
||||
prompt_embeds,
|
||||
id_embeds,
|
||||
class_tokens_mask,
|
||||
@ -365,24 +365,24 @@ struct PhotoMakerIDEncoder_CLIPInsightfaceExtendtokenBlock : public CLIPVisionMo
|
||||
num_tokens));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* id_pixel_values,
|
||||
struct ggml_tensor* prompt_embeds,
|
||||
struct ggml_tensor* class_tokens_mask,
|
||||
struct ggml_tensor* class_tokens_mask_pos,
|
||||
struct ggml_tensor* id_embeds,
|
||||
struct ggml_tensor* left,
|
||||
struct ggml_tensor* right) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* id_pixel_values,
|
||||
ggml_tensor* prompt_embeds,
|
||||
ggml_tensor* class_tokens_mask,
|
||||
ggml_tensor* class_tokens_mask_pos,
|
||||
ggml_tensor* id_embeds,
|
||||
ggml_tensor* left,
|
||||
ggml_tensor* right) {
|
||||
// x: [N, channels, h, w]
|
||||
auto vision_model = std::dynamic_pointer_cast<CLIPVisionModel>(blocks["vision_model"]);
|
||||
auto fuse_module = std::dynamic_pointer_cast<FuseModule>(blocks["fuse_module"]);
|
||||
auto qformer_perceiver = std::dynamic_pointer_cast<QFormerPerceiver>(blocks["qformer_perceiver"]);
|
||||
|
||||
// struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
||||
struct ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size]
|
||||
// ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values); // [N, hidden_size]
|
||||
ggml_tensor* last_hidden_state = vision_model->forward(ctx, id_pixel_values, false); // [N, hidden_size]
|
||||
id_embeds = qformer_perceiver->forward(ctx, id_embeds, last_hidden_state);
|
||||
|
||||
struct ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
||||
ggml_tensor* updated_prompt_embeds = fuse_module->forward(ctx,
|
||||
prompt_embeds,
|
||||
id_embeds,
|
||||
class_tokens_mask,
|
||||
@ -436,18 +436,17 @@ public:
|
||||
return pm_version;
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
if (pm_version == PM_VERSION_1)
|
||||
id_encoder.get_param_tensors(tensors, prefix);
|
||||
else if (pm_version == PM_VERSION_2)
|
||||
id_encoder2.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph( // struct ggml_allocr* allocr,
|
||||
struct ggml_tensor* id_pixel_values,
|
||||
struct ggml_tensor* prompt_embeds,
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& id_pixel_values_tensor,
|
||||
const sd::Tensor<float>& prompt_embeds_tensor,
|
||||
std::vector<bool>& class_tokens_mask,
|
||||
struct ggml_tensor* id_embeds) {
|
||||
const sd::Tensor<float>& id_embeds_tensor = {}) {
|
||||
ctm.clear();
|
||||
ctmf16.clear();
|
||||
ctmpos.clear();
|
||||
@ -458,20 +457,20 @@ public:
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
|
||||
ggml_tensor* id_pixel_values = make_input(id_pixel_values_tensor);
|
||||
ggml_tensor* prompt_embeds = make_input(prompt_embeds_tensor);
|
||||
ggml_tensor* id_embeds = make_optional_input(id_embeds_tensor);
|
||||
|
||||
int64_t hidden_size = prompt_embeds->ne[0];
|
||||
int64_t seq_length = prompt_embeds->ne[1];
|
||||
ggml_type type = GGML_TYPE_F32;
|
||||
|
||||
struct ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
|
||||
ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
|
||||
|
||||
struct ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values);
|
||||
struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
|
||||
struct ggml_tensor* id_embeds_d = to_backend(id_embeds);
|
||||
|
||||
struct ggml_tensor* left = nullptr;
|
||||
struct ggml_tensor* right = nullptr;
|
||||
ggml_tensor* left = nullptr;
|
||||
ggml_tensor* right = nullptr;
|
||||
for (int i = 0; i < class_tokens_mask.size(); i++) {
|
||||
if (class_tokens_mask[i]) {
|
||||
// printf(" 1,");
|
||||
@ -495,7 +494,7 @@ public:
|
||||
right = ggml_new_tensor_3d(runner_ctx.ggml_ctx, type,
|
||||
hidden_size, seq_length - ctmpos[ctmpos.size() - 1] - 1, 1);
|
||||
}
|
||||
struct ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(runner_ctx.ggml_ctx, GGML_TYPE_I32, ctmpos.size());
|
||||
ggml_tensor* class_tokens_mask_pos = ggml_new_tensor_1d(runner_ctx.ggml_ctx, GGML_TYPE_I32, ctmpos.size());
|
||||
|
||||
{
|
||||
if (type == GGML_TYPE_F16)
|
||||
@ -526,21 +525,21 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
struct ggml_tensor* updated_prompt_embeds = nullptr;
|
||||
ggml_tensor* updated_prompt_embeds = nullptr;
|
||||
if (pm_version == PM_VERSION_1)
|
||||
updated_prompt_embeds = id_encoder.forward(&runner_ctx,
|
||||
id_pixel_values_d,
|
||||
prompt_embeds_d,
|
||||
id_pixel_values,
|
||||
prompt_embeds,
|
||||
class_tokens_mask_d,
|
||||
class_tokens_mask_pos,
|
||||
left, right);
|
||||
else if (pm_version == PM_VERSION_2)
|
||||
updated_prompt_embeds = id_encoder2.forward(&runner_ctx,
|
||||
id_pixel_values_d,
|
||||
prompt_embeds_d,
|
||||
id_pixel_values,
|
||||
prompt_embeds,
|
||||
class_tokens_mask_d,
|
||||
class_tokens_mask_pos,
|
||||
id_embeds_d,
|
||||
id_embeds,
|
||||
left, right);
|
||||
|
||||
ggml_build_forward_expand(gf, updated_prompt_embeds);
|
||||
@ -548,25 +547,21 @@ public:
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* id_pixel_values,
|
||||
struct ggml_tensor* prompt_embeds,
|
||||
struct ggml_tensor* id_embeds,
|
||||
std::vector<bool>& class_tokens_mask,
|
||||
struct ggml_tensor** updated_prompt_embeds,
|
||||
ggml_context* output_ctx) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
|
||||
sd::Tensor<float> compute(const int n_threads,
|
||||
const sd::Tensor<float>& id_pixel_values,
|
||||
const sd::Tensor<float>& prompt_embeds,
|
||||
const sd::Tensor<float>& id_embeds,
|
||||
std::vector<bool>& class_tokens_mask) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
|
||||
};
|
||||
|
||||
// GGMLRunner::compute(get_graph, n_threads, updated_prompt_embeds);
|
||||
return GGMLRunner::compute(get_graph, n_threads, true, updated_prompt_embeds, output_ctx);
|
||||
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true));
|
||||
}
|
||||
};
|
||||
|
||||
struct PhotoMakerIDEmbed : public GGMLRunner {
|
||||
std::map<std::string, struct ggml_tensor*> tensors;
|
||||
std::map<std::string, ggml_tensor*> tensors;
|
||||
std::string file_path;
|
||||
ModelLoader* model_loader;
|
||||
bool load_failed = false;
|
||||
@ -606,7 +601,7 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
|
||||
}
|
||||
if (dry_run) {
|
||||
std::lock_guard<std::mutex> lock(tensor_mutex);
|
||||
struct ggml_tensor* real = ggml_new_tensor(params_ctx,
|
||||
ggml_tensor* real = ggml_new_tensor(params_ctx,
|
||||
tensor_storage.type,
|
||||
tensor_storage.n_dims,
|
||||
tensor_storage.ne);
|
||||
@ -629,8 +624,8 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
|
||||
return true;
|
||||
}
|
||||
|
||||
struct ggml_tensor* get() {
|
||||
std::map<std::string, struct ggml_tensor*>::iterator pos;
|
||||
ggml_tensor* get() {
|
||||
std::map<std::string, ggml_tensor*>::iterator pos;
|
||||
pos = tensors.find("pmid.id_embeds");
|
||||
if (pos != tensors.end())
|
||||
return pos->second;
|
||||
|
||||
@ -1,179 +1,241 @@
|
||||
#ifndef __PREPROCESSING_HPP__
|
||||
#define __PREPROCESSING_HPP__
|
||||
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
#include "ggml_extend.hpp"
|
||||
|
||||
#define M_PI_ 3.14159265358979323846f
|
||||
|
||||
void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) {
|
||||
struct ggml_init_params params;
|
||||
params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
struct ggml_context* ctx0 = ggml_init(params);
|
||||
struct ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
|
||||
ggml_fp32_to_fp16_row((float*)kernel->data, (ggml_fp16_t*)kernel_fp16->data, ggml_nelements(kernel));
|
||||
ggml_tensor* h = ggml_conv_2d(ctx0, kernel_fp16, input, 1, 1, padding, padding, 1, 1);
|
||||
ggml_cgraph* gf = ggml_new_graph(ctx0);
|
||||
ggml_build_forward_expand(gf, ggml_cpy(ctx0, h, output));
|
||||
ggml_graph_compute_with_ctx(ctx0, gf, 1);
|
||||
ggml_free(ctx0);
|
||||
static inline int64_t preprocessing_offset_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
|
||||
const auto& shape = tensor.shape();
|
||||
int64_t n0 = shape.size() > 0 ? shape[0] : 1;
|
||||
int64_t n1 = shape.size() > 1 ? shape[1] : 1;
|
||||
int64_t n2 = shape.size() > 2 ? shape[2] : 1;
|
||||
return ((i3 * n2 + i2) * n1 + i1) * n0 + i0;
|
||||
}
|
||||
|
||||
void gaussian_kernel(struct ggml_tensor* kernel) {
|
||||
int ks_mid = static_cast<int>(kernel->ne[0] / 2);
|
||||
static inline float preprocessing_get_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
|
||||
return tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))];
|
||||
}
|
||||
|
||||
static inline void preprocessing_set_4d(sd::Tensor<float>& tensor, float value, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
|
||||
tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value;
|
||||
}
|
||||
|
||||
static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t image) {
|
||||
sd::Tensor<float> tensor({static_cast<int64_t>(image.width), static_cast<int64_t>(image.height), static_cast<int64_t>(image.channel), 1});
|
||||
for (uint32_t y = 0; y < image.height; ++y) {
|
||||
for (uint32_t x = 0; x < image.width; ++x) {
|
||||
for (uint32_t c = 0; c < image.channel; ++c) {
|
||||
preprocessing_set_4d(tensor, sd_image_get_f32(image, x, y, c), x, y, c, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return tensor;
|
||||
}
|
||||
|
||||
static inline void preprocessing_tensor_to_sd_image(const sd::Tensor<float>& tensor, uint8_t* image_data) {
|
||||
GGML_ASSERT(tensor.dim() == 4);
|
||||
GGML_ASSERT(tensor.shape()[3] == 1);
|
||||
GGML_ASSERT(image_data != nullptr);
|
||||
|
||||
int width = static_cast<int>(tensor.shape()[0]);
|
||||
int height = static_cast<int>(tensor.shape()[1]);
|
||||
int channel = static_cast<int>(tensor.shape()[2]);
|
||||
for (int y = 0; y < height; ++y) {
|
||||
for (int x = 0; x < width; ++x) {
|
||||
for (int c = 0; c < channel; ++c) {
|
||||
float value = preprocessing_get_4d(tensor, x, y, c, 0);
|
||||
value = std::min(1.0f, std::max(0.0f, value));
|
||||
image_data[(y * width + x) * channel + c] = static_cast<uint8_t>(std::round(value * 255.0f));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline sd::Tensor<float> gaussian_kernel_tensor(int kernel_size) {
|
||||
sd::Tensor<float> kernel({kernel_size, kernel_size, 1, 1});
|
||||
int ks_mid = kernel_size / 2;
|
||||
float sigma = 1.4f;
|
||||
float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f));
|
||||
for (int y = 0; y < kernel->ne[0]; y++) {
|
||||
float normal = 1.f / (2.0f * M_PI_ * std::pow(sigma, 2.0f));
|
||||
for (int y = 0; y < kernel_size; ++y) {
|
||||
float gx = static_cast<float>(-ks_mid + y);
|
||||
for (int x = 0; x < kernel->ne[1]; x++) {
|
||||
for (int x = 0; x < kernel_size; ++x) {
|
||||
float gy = static_cast<float>(-ks_mid + x);
|
||||
float k_ = expf(-((gx * gx + gy * gy) / (2.0f * powf(sigma, 2.0f)))) * normal;
|
||||
ggml_ext_tensor_set_f32(kernel, k_, x, y);
|
||||
float k = std::exp(-((gx * gx + gy * gy) / (2.0f * std::pow(sigma, 2.0f)))) * normal;
|
||||
preprocessing_set_4d(kernel, k, x, y, 0, 0);
|
||||
}
|
||||
}
|
||||
return kernel;
|
||||
}
|
||||
|
||||
void grayscale(struct ggml_tensor* rgb_img, struct ggml_tensor* grayscale) {
|
||||
for (int iy = 0; iy < rgb_img->ne[1]; iy++) {
|
||||
for (int ix = 0; ix < rgb_img->ne[0]; ix++) {
|
||||
float r = ggml_ext_tensor_get_f32(rgb_img, ix, iy);
|
||||
float g = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 1);
|
||||
float b = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 2);
|
||||
static inline sd::Tensor<float> convolve_tensor(const sd::Tensor<float>& input, const sd::Tensor<float>& kernel, int padding) {
|
||||
GGML_ASSERT(input.dim() == 4);
|
||||
GGML_ASSERT(kernel.dim() == 4);
|
||||
GGML_ASSERT(input.shape()[3] == 1);
|
||||
GGML_ASSERT(kernel.shape()[2] == 1);
|
||||
GGML_ASSERT(kernel.shape()[3] == 1);
|
||||
|
||||
sd::Tensor<float> output(input.shape());
|
||||
int64_t width = input.shape()[0];
|
||||
int64_t height = input.shape()[1];
|
||||
int64_t channels = input.shape()[2];
|
||||
int64_t kernel_w = kernel.shape()[0];
|
||||
int64_t kernel_h = kernel.shape()[1];
|
||||
|
||||
for (int64_t c = 0; c < channels; ++c) {
|
||||
for (int64_t y = 0; y < height; ++y) {
|
||||
for (int64_t x = 0; x < width; ++x) {
|
||||
float sum = 0.0f;
|
||||
for (int64_t ky = 0; ky < kernel_h; ++ky) {
|
||||
int64_t iy = y + ky - padding;
|
||||
if (iy < 0 || iy >= height) {
|
||||
continue;
|
||||
}
|
||||
for (int64_t kx = 0; kx < kernel_w; ++kx) {
|
||||
int64_t ix = x + kx - padding;
|
||||
if (ix < 0 || ix >= width) {
|
||||
continue;
|
||||
}
|
||||
sum += preprocessing_get_4d(input, ix, iy, c, 0) * preprocessing_get_4d(kernel, kx, ky, 0, 0);
|
||||
}
|
||||
}
|
||||
preprocessing_set_4d(output, sum, x, y, c, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
static inline sd::Tensor<float> grayscale_tensor(const sd::Tensor<float>& rgb_img) {
|
||||
GGML_ASSERT(rgb_img.dim() == 4);
|
||||
GGML_ASSERT(rgb_img.shape()[2] >= 3);
|
||||
sd::Tensor<float> grayscale({rgb_img.shape()[0], rgb_img.shape()[1], 1, rgb_img.shape()[3]});
|
||||
for (int64_t iy = 0; iy < rgb_img.shape()[1]; ++iy) {
|
||||
for (int64_t ix = 0; ix < rgb_img.shape()[0]; ++ix) {
|
||||
float r = preprocessing_get_4d(rgb_img, ix, iy, 0, 0);
|
||||
float g = preprocessing_get_4d(rgb_img, ix, iy, 1, 0);
|
||||
float b = preprocessing_get_4d(rgb_img, ix, iy, 2, 0);
|
||||
float gray = 0.2989f * r + 0.5870f * g + 0.1140f * b;
|
||||
ggml_ext_tensor_set_f32(grayscale, gray, ix, iy);
|
||||
preprocessing_set_4d(grayscale, gray, ix, iy, 0, 0);
|
||||
}
|
||||
}
|
||||
return grayscale;
|
||||
}
|
||||
|
||||
void prop_hypot(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) {
|
||||
int n_elements = static_cast<int>(ggml_nelements(h));
|
||||
float* dx = (float*)x->data;
|
||||
float* dy = (float*)y->data;
|
||||
float* dh = (float*)h->data;
|
||||
for (int i = 0; i < n_elements; i++) {
|
||||
dh[i] = sqrtf(dx[i] * dx[i] + dy[i] * dy[i]);
|
||||
static inline sd::Tensor<float> tensor_hypot(const sd::Tensor<float>& x, const sd::Tensor<float>& y) {
|
||||
sd::tensor_check_same_shape(x, y);
|
||||
sd::Tensor<float> out(x.shape());
|
||||
for (int64_t i = 0; i < out.numel(); ++i) {
|
||||
out[i] = std::sqrt(x[i] * x[i] + y[i] * y[i]);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
void prop_arctan2(struct ggml_tensor* x, struct ggml_tensor* y, struct ggml_tensor* h) {
|
||||
int n_elements = static_cast<int>(ggml_nelements(h));
|
||||
float* dx = (float*)x->data;
|
||||
float* dy = (float*)y->data;
|
||||
float* dh = (float*)h->data;
|
||||
for (int i = 0; i < n_elements; i++) {
|
||||
dh[i] = atan2f(dy[i], dx[i]);
|
||||
static inline sd::Tensor<float> tensor_arctan2(const sd::Tensor<float>& x, const sd::Tensor<float>& y) {
|
||||
sd::tensor_check_same_shape(x, y);
|
||||
sd::Tensor<float> out(x.shape());
|
||||
for (int64_t i = 0; i < out.numel(); ++i) {
|
||||
out[i] = std::atan2(y[i], x[i]);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
void normalize_tensor(struct ggml_tensor* g) {
|
||||
int n_elements = static_cast<int>(ggml_nelements(g));
|
||||
float* dg = (float*)g->data;
|
||||
float max = -INFINITY;
|
||||
for (int i = 0; i < n_elements; i++) {
|
||||
max = dg[i] > max ? dg[i] : max;
|
||||
static inline void normalize_tensor(sd::Tensor<float>* g) {
|
||||
GGML_ASSERT(g != nullptr);
|
||||
if (g->empty()) {
|
||||
return;
|
||||
}
|
||||
max = 1.0f / max;
|
||||
for (int i = 0; i < n_elements; i++) {
|
||||
dg[i] *= max;
|
||||
float max_value = -std::numeric_limits<float>::infinity();
|
||||
for (int64_t i = 0; i < g->numel(); ++i) {
|
||||
max_value = std::max(max_value, (*g)[i]);
|
||||
}
|
||||
if (max_value == 0.0f || !std::isfinite(max_value)) {
|
||||
return;
|
||||
}
|
||||
*g *= (1.0f / max_value);
|
||||
}
|
||||
|
||||
void non_max_supression(struct ggml_tensor* result, struct ggml_tensor* G, struct ggml_tensor* D) {
|
||||
for (int iy = 1; iy < result->ne[1] - 1; iy++) {
|
||||
for (int ix = 1; ix < result->ne[0] - 1; ix++) {
|
||||
float angle = ggml_ext_tensor_get_f32(D, ix, iy) * 180.0f / M_PI_;
|
||||
angle = angle < 0.0f ? angle += 180.0f : angle;
|
||||
static inline sd::Tensor<float> non_max_supression(const sd::Tensor<float>& G, const sd::Tensor<float>& D) {
|
||||
GGML_ASSERT(G.shape() == D.shape());
|
||||
sd::Tensor<float> result = sd::Tensor<float>::zeros(G.shape());
|
||||
for (int64_t iy = 1; iy < result.shape()[1] - 1; ++iy) {
|
||||
for (int64_t ix = 1; ix < result.shape()[0] - 1; ++ix) {
|
||||
float angle = preprocessing_get_4d(D, ix, iy, 0, 0) * 180.0f / M_PI_;
|
||||
angle = angle < 0.0f ? angle + 180.0f : angle;
|
||||
float q = 1.0f;
|
||||
float r = 1.0f;
|
||||
|
||||
// angle 0
|
||||
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180)) {
|
||||
q = ggml_ext_tensor_get_f32(G, ix, iy + 1);
|
||||
r = ggml_ext_tensor_get_f32(G, ix, iy - 1);
|
||||
}
|
||||
// angle 45
|
||||
else if (22.5f >= angle && angle < 67.5f) {
|
||||
q = ggml_ext_tensor_get_f32(G, ix + 1, iy - 1);
|
||||
r = ggml_ext_tensor_get_f32(G, ix - 1, iy + 1);
|
||||
}
|
||||
// angle 90
|
||||
else if (67.5f >= angle && angle < 112.5) {
|
||||
q = ggml_ext_tensor_get_f32(G, ix + 1, iy);
|
||||
r = ggml_ext_tensor_get_f32(G, ix - 1, iy);
|
||||
}
|
||||
// angle 135
|
||||
else if (112.5 >= angle && angle < 157.5f) {
|
||||
q = ggml_ext_tensor_get_f32(G, ix - 1, iy - 1);
|
||||
r = ggml_ext_tensor_get_f32(G, ix + 1, iy + 1);
|
||||
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180.0f)) {
|
||||
q = preprocessing_get_4d(G, ix, iy + 1, 0, 0);
|
||||
r = preprocessing_get_4d(G, ix, iy - 1, 0, 0);
|
||||
} else if (22.5f >= angle && angle < 67.5f) {
|
||||
q = preprocessing_get_4d(G, ix + 1, iy - 1, 0, 0);
|
||||
r = preprocessing_get_4d(G, ix - 1, iy + 1, 0, 0);
|
||||
} else if (67.5f >= angle && angle < 112.5f) {
|
||||
q = preprocessing_get_4d(G, ix + 1, iy, 0, 0);
|
||||
r = preprocessing_get_4d(G, ix - 1, iy, 0, 0);
|
||||
} else if (112.5f >= angle && angle < 157.5f) {
|
||||
q = preprocessing_get_4d(G, ix - 1, iy - 1, 0, 0);
|
||||
r = preprocessing_get_4d(G, ix + 1, iy + 1, 0, 0);
|
||||
}
|
||||
|
||||
float cur = ggml_ext_tensor_get_f32(G, ix, iy);
|
||||
if ((cur >= q) && (cur >= r)) {
|
||||
ggml_ext_tensor_set_f32(result, cur, ix, iy);
|
||||
} else {
|
||||
ggml_ext_tensor_set_f32(result, 0.0f, ix, iy);
|
||||
}
|
||||
float cur = preprocessing_get_4d(G, ix, iy, 0, 0);
|
||||
preprocessing_set_4d(result, (cur >= q && cur >= r) ? cur : 0.0f, ix, iy, 0, 0);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) {
|
||||
int n_elements = static_cast<int>(ggml_nelements(img));
|
||||
float* imd = (float*)img->data;
|
||||
float max = -INFINITY;
|
||||
for (int i = 0; i < n_elements; i++) {
|
||||
max = imd[i] > max ? imd[i] : max;
|
||||
static inline void threshold_hystersis(sd::Tensor<float>* img, float high_threshold, float low_threshold, float weak, float strong) {
|
||||
GGML_ASSERT(img != nullptr);
|
||||
if (img->empty()) {
|
||||
return;
|
||||
}
|
||||
float ht = max * high_threshold;
|
||||
float max_value = -std::numeric_limits<float>::infinity();
|
||||
for (int64_t i = 0; i < img->numel(); ++i) {
|
||||
max_value = std::max(max_value, (*img)[i]);
|
||||
}
|
||||
|
||||
float ht = max_value * high_threshold;
|
||||
float lt = ht * low_threshold;
|
||||
for (int i = 0; i < n_elements; i++) {
|
||||
float img_v = imd[i];
|
||||
if (img_v >= ht) { // strong pixel
|
||||
imd[i] = strong;
|
||||
} else if (img_v <= ht && img_v >= lt) { // strong pixel
|
||||
imd[i] = weak;
|
||||
for (int64_t i = 0; i < img->numel(); ++i) {
|
||||
float img_v = (*img)[i];
|
||||
if (img_v >= ht) {
|
||||
(*img)[i] = strong;
|
||||
} else if (img_v <= ht && img_v >= lt) {
|
||||
(*img)[i] = weak;
|
||||
}
|
||||
}
|
||||
|
||||
for (int iy = 0; iy < img->ne[1]; iy++) {
|
||||
for (int ix = 0; ix < img->ne[0]; ix++) {
|
||||
if (ix >= 3 && ix <= img->ne[0] - 3 && iy >= 3 && iy <= img->ne[1] - 3) {
|
||||
ggml_ext_tensor_set_f32(img, ggml_ext_tensor_get_f32(img, ix, iy), ix, iy);
|
||||
} else {
|
||||
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
|
||||
for (int64_t iy = 0; iy < img->shape()[1]; ++iy) {
|
||||
for (int64_t ix = 0; ix < img->shape()[0]; ++ix) {
|
||||
if (!(ix >= 3 && ix <= img->shape()[0] - 3 && iy >= 3 && iy <= img->shape()[1] - 3)) {
|
||||
preprocessing_set_4d(*img, 0.0f, ix, iy, 0, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// hysteresis
|
||||
for (int iy = 1; iy < img->ne[1] - 1; iy++) {
|
||||
for (int ix = 1; ix < img->ne[0] - 1; ix++) {
|
||||
float imd_v = ggml_ext_tensor_get_f32(img, ix, iy);
|
||||
for (int64_t iy = 1; iy < img->shape()[1] - 1; ++iy) {
|
||||
for (int64_t ix = 1; ix < img->shape()[0] - 1; ++ix) {
|
||||
float imd_v = preprocessing_get_4d(*img, ix, iy, 0, 0);
|
||||
if (imd_v == weak) {
|
||||
if (ggml_ext_tensor_get_f32(img, ix + 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix + 1, iy) == strong ||
|
||||
ggml_ext_tensor_get_f32(img, ix, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix, iy + 1) == strong ||
|
||||
ggml_ext_tensor_get_f32(img, ix - 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix - 1, iy) == strong) {
|
||||
ggml_ext_tensor_set_f32(img, strong, ix, iy);
|
||||
} else {
|
||||
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
|
||||
}
|
||||
bool has_strong_neighbor =
|
||||
preprocessing_get_4d(*img, ix + 1, iy - 1, 0, 0) == strong ||
|
||||
preprocessing_get_4d(*img, ix + 1, iy, 0, 0) == strong ||
|
||||
preprocessing_get_4d(*img, ix, iy - 1, 0, 0) == strong ||
|
||||
preprocessing_get_4d(*img, ix, iy + 1, 0, 0) == strong ||
|
||||
preprocessing_get_4d(*img, ix - 1, iy - 1, 0, 0) == strong ||
|
||||
preprocessing_get_4d(*img, ix - 1, iy, 0, 0) == strong;
|
||||
preprocessing_set_4d(*img, has_strong_neighbor ? strong : 0.0f, ix, iy, 0, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
|
||||
struct ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
|
||||
if (!work_ctx) {
|
||||
LOG_ERROR("ggml_init() failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
float kX[9] = {
|
||||
-1, 0, 1,
|
||||
-2, 0, 2,
|
||||
@ -184,42 +246,32 @@ bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold,
|
||||
0, 0, 0,
|
||||
-1, -2, -1};
|
||||
|
||||
// generate kernel
|
||||
int kernel_size = 5;
|
||||
struct ggml_tensor* gkernel = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, kernel_size, kernel_size, 1, 1);
|
||||
struct ggml_tensor* sf_kx = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
|
||||
memcpy(sf_kx->data, kX, ggml_nbytes(sf_kx));
|
||||
struct ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
|
||||
memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
|
||||
gaussian_kernel(gkernel);
|
||||
struct ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
|
||||
struct ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
|
||||
struct ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
|
||||
struct ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
|
||||
struct ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
|
||||
struct ggml_tensor* tetha = ggml_dup_tensor(work_ctx, image_gray);
|
||||
sd_image_to_ggml_tensor(img, image);
|
||||
grayscale(image, image_gray);
|
||||
convolve(image_gray, image_gray, gkernel, 2);
|
||||
convolve(image_gray, iX, sf_kx, 1);
|
||||
convolve(image_gray, iY, sf_ky, 1);
|
||||
prop_hypot(iX, iY, G);
|
||||
normalize_tensor(G);
|
||||
prop_arctan2(iX, iY, tetha);
|
||||
non_max_supression(image_gray, G, tetha);
|
||||
threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong);
|
||||
// to RGB channels
|
||||
for (uint32_t iy = 0; iy < img.height; iy++) {
|
||||
for (uint32_t ix = 0; ix < img.width; ix++) {
|
||||
float gray = ggml_ext_tensor_get_f32(image_gray, ix, iy);
|
||||
sd::Tensor<float> gkernel = gaussian_kernel_tensor(5);
|
||||
sd::Tensor<float> sf_kx({3, 3, 1, 1}, std::vector<float>(kX, kX + 9));
|
||||
sd::Tensor<float> sf_ky({3, 3, 1, 1}, std::vector<float>(kY, kY + 9));
|
||||
|
||||
sd::Tensor<float> image = sd_image_to_preprocessing_tensor(img);
|
||||
sd::Tensor<float> image_gray = grayscale_tensor(image);
|
||||
image_gray = convolve_tensor(image_gray, gkernel, 2);
|
||||
sd::Tensor<float> iX = convolve_tensor(image_gray, sf_kx, 1);
|
||||
sd::Tensor<float> iY = convolve_tensor(image_gray, sf_ky, 1);
|
||||
sd::Tensor<float> G = tensor_hypot(iX, iY);
|
||||
normalize_tensor(&G);
|
||||
sd::Tensor<float> theta = tensor_arctan2(iX, iY);
|
||||
image_gray = non_max_supression(G, theta);
|
||||
threshold_hystersis(&image_gray, high_threshold, low_threshold, weak, strong);
|
||||
|
||||
for (uint32_t iy = 0; iy < img.height; ++iy) {
|
||||
for (uint32_t ix = 0; ix < img.width; ++ix) {
|
||||
float gray = preprocessing_get_4d(image_gray, ix, iy, 0, 0);
|
||||
gray = inverse ? 1.0f - gray : gray;
|
||||
ggml_ext_tensor_set_f32(image, gray, ix, iy);
|
||||
ggml_ext_tensor_set_f32(image, gray, ix, iy, 1);
|
||||
ggml_ext_tensor_set_f32(image, gray, ix, iy, 2);
|
||||
for (uint32_t c = 0; c < img.channel; ++c) {
|
||||
preprocessing_set_4d(image, gray, ix, iy, c, 0);
|
||||
}
|
||||
}
|
||||
ggml_tensor_to_sd_image(image, img.data);
|
||||
ggml_free(work_ctx);
|
||||
}
|
||||
|
||||
preprocessing_tensor_to_sd_image(image, img.data);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -26,9 +26,9 @@ namespace Qwen {
|
||||
blocks["linear_2"] = std::shared_ptr<GGMLBlock>(new Linear(time_embed_dim, out_dim, sample_proj_bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* sample,
|
||||
struct ggml_tensor* condition = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* sample,
|
||||
ggml_tensor* condition = nullptr) {
|
||||
if (condition != nullptr) {
|
||||
auto cond_proj = std::dynamic_pointer_cast<Linear>(blocks["cond_proj"]);
|
||||
sample = ggml_add(ctx->ggml_ctx, sample, cond_proj->forward(ctx, condition));
|
||||
@ -49,8 +49,8 @@ namespace Qwen {
|
||||
blocks["timestep_embedder"] = std::shared_ptr<GGMLBlock>(new TimestepEmbedding(256, embedding_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* timesteps) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* timesteps) {
|
||||
// timesteps: [N,]
|
||||
// return: [N, embedding_dim]
|
||||
auto timestep_embedder = std::dynamic_pointer_cast<TimestepEmbedding>(blocks["timestep_embedder"]);
|
||||
@ -107,10 +107,10 @@ namespace Qwen {
|
||||
}
|
||||
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* img,
|
||||
struct ggml_tensor* txt,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr) {
|
||||
ggml_tensor* img,
|
||||
ggml_tensor* txt,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr) {
|
||||
// img: [N, n_img_token, hidden_size]
|
||||
// txt: [N, n_txt_token, hidden_size]
|
||||
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
||||
@ -249,11 +249,11 @@ namespace Qwen {
|
||||
}
|
||||
|
||||
virtual std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* img,
|
||||
struct ggml_tensor* txt,
|
||||
struct ggml_tensor* t_emb,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* modulate_index = nullptr) {
|
||||
ggml_tensor* img,
|
||||
ggml_tensor* txt,
|
||||
ggml_tensor* t_emb,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* modulate_index = nullptr) {
|
||||
// img: [N, n_img_token, hidden_size]
|
||||
// txt: [N, n_txt_token, hidden_size]
|
||||
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
|
||||
@ -325,9 +325,9 @@ namespace Qwen {
|
||||
blocks["linear"] = std::shared_ptr<GGMLBlock>(new Linear(conditioning_embedding_dim, embedding_dim * 2, bias));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||
@ -389,12 +389,12 @@ namespace Qwen {
|
||||
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, params.patch_size * params.patch_size * params.out_channels));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* modulate_index = nullptr) {
|
||||
ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* modulate_index = nullptr) {
|
||||
auto time_text_embed = std::dynamic_pointer_cast<QwenTimestepProjEmbeddings>(blocks["time_text_embed"]);
|
||||
auto txt_norm = std::dynamic_pointer_cast<RMSNorm>(blocks["txt_norm"]);
|
||||
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
|
||||
@ -429,13 +429,13 @@ namespace Qwen {
|
||||
return img;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* pe,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* pe,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
struct ggml_tensor* modulate_index = nullptr) {
|
||||
ggml_tensor* modulate_index = nullptr) {
|
||||
// Forward pass of DiT.
|
||||
// x: [N, C, H, W]
|
||||
// timestep: [N,]
|
||||
@ -521,24 +521,25 @@ namespace Qwen {
|
||||
return "qwen_image";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
qwen_image.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor,
|
||||
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
|
||||
bool increase_ref_index = false) {
|
||||
ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
struct ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
|
||||
|
||||
x = to_backend(x);
|
||||
context = to_backend(context);
|
||||
timesteps = to_backend(timesteps);
|
||||
|
||||
for (int i = 0; i < ref_latents.size(); i++) {
|
||||
ref_latents[i] = to_backend(ref_latents[i]);
|
||||
GGML_ASSERT(!context_tensor.empty());
|
||||
ggml_tensor* context = make_input(context_tensor);
|
||||
std::vector<ggml_tensor*> ref_latents;
|
||||
ref_latents.reserve(ref_latents_tensor.size());
|
||||
for (const auto& ref_latent_tensor : ref_latents_tensor) {
|
||||
ref_latents.push_back(make_input(ref_latent_tensor));
|
||||
}
|
||||
|
||||
pe_vec = Rope::gen_qwen_image_pe(static_cast<int>(x->ne[1]),
|
||||
@ -587,7 +588,7 @@ namespace Qwen {
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = qwen_image.forward(&runner_ctx,
|
||||
ggml_tensor* out = qwen_image.forward(&runner_ctx,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
@ -600,54 +601,59 @@ namespace Qwen {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
bool increase_ref_index = false,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context,
|
||||
const std::vector<sd::Tensor<float>>& ref_latents = {},
|
||||
bool increase_ref_index = false) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// timesteps: [N, ]
|
||||
// context: [N, max_position, hidden_size]
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
|
||||
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1);
|
||||
// ggml_set_f32(x, 0.01f);
|
||||
auto x = load_tensor_from_file(work_ctx, "./qwen_image_x.bin");
|
||||
print_ggml_tensor(x);
|
||||
auto x = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_x.bin");
|
||||
print_sd_tensor(x);
|
||||
|
||||
std::vector<float> timesteps_vec(1, 1000.f);
|
||||
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
|
||||
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
|
||||
|
||||
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 3584, 256, 1);
|
||||
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 3584, 256, 1);
|
||||
// ggml_set_f32(context, 0.01f);
|
||||
auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin");
|
||||
print_ggml_tensor(context);
|
||||
auto context = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_context.bin");
|
||||
print_sd_tensor(context);
|
||||
|
||||
struct ggml_tensor* out = nullptr;
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
|
||||
auto out_opt = compute(8,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
{},
|
||||
false);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("qwen_image test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
18
src/rope.hpp
18
src/rope.hpp
@ -600,9 +600,9 @@ namespace Rope {
|
||||
return embed_nd(ids, bs, static_cast<float>(theta), axes_dim, wrap_dims);
|
||||
}
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* apply_rope(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
__STATIC_INLINE__ ggml_tensor* apply_rope(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
bool rope_interleaved = true) {
|
||||
// x: [N, L, n_head, d_head]
|
||||
// pe: [L, d_head/2, 2, 2], [[cos, -sin], [sin, cos]]
|
||||
@ -641,12 +641,12 @@ namespace Rope {
|
||||
return x_out;
|
||||
}
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* attention(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* q,
|
||||
struct ggml_tensor* k,
|
||||
struct ggml_tensor* v,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask,
|
||||
__STATIC_INLINE__ ggml_tensor* attention(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* q,
|
||||
ggml_tensor* k,
|
||||
ggml_tensor* v,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask,
|
||||
float kv_scale = 1.0f,
|
||||
bool rope_interleaved = true) {
|
||||
// q,k,v: [N, L, n_head, d_head]
|
||||
|
||||
361
src/sample-cache.cpp
Normal file
361
src/sample-cache.cpp
Normal file
@ -0,0 +1,361 @@
|
||||
#include "sample-cache.h"
|
||||
|
||||
namespace sd_sample {
|
||||
|
||||
static float get_cache_reuse_threshold(const sd_cache_params_t& params) {
|
||||
float reuse_threshold = params.reuse_threshold;
|
||||
if (reuse_threshold == INFINITY) {
|
||||
if (params.mode == SD_CACHE_EASYCACHE) {
|
||||
reuse_threshold = 0.2f;
|
||||
} else if (params.mode == SD_CACHE_UCACHE) {
|
||||
reuse_threshold = 1.0f;
|
||||
}
|
||||
}
|
||||
return std::max(0.0f, reuse_threshold);
|
||||
}
|
||||
|
||||
bool SampleCacheRuntime::easycache_enabled() const {
|
||||
return mode == SampleCacheMode::EASYCACHE;
|
||||
}
|
||||
|
||||
bool SampleCacheRuntime::ucache_enabled() const {
|
||||
return mode == SampleCacheMode::UCACHE;
|
||||
}
|
||||
|
||||
bool SampleCacheRuntime::cachedit_enabled() const {
|
||||
return mode == SampleCacheMode::CACHEDIT;
|
||||
}
|
||||
|
||||
static bool has_valid_cache_percent_range(const sd_cache_params_t& cache_params) {
|
||||
if (cache_params.mode != SD_CACHE_EASYCACHE && cache_params.mode != SD_CACHE_UCACHE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return cache_params.start_percent >= 0.0f &&
|
||||
cache_params.start_percent < 1.0f &&
|
||||
cache_params.end_percent > 0.0f &&
|
||||
cache_params.end_percent <= 1.0f &&
|
||||
cache_params.start_percent < cache_params.end_percent;
|
||||
}
|
||||
|
||||
static void init_easycache_runtime(SampleCacheRuntime& runtime,
|
||||
SDVersion version,
|
||||
const sd_cache_params_t& cache_params,
|
||||
Denoiser* denoiser) {
|
||||
if (!sd_version_is_dit(version)) {
|
||||
LOG_WARN("EasyCache requested but not supported for this model type");
|
||||
return;
|
||||
}
|
||||
|
||||
EasyCacheConfig config;
|
||||
config.enabled = true;
|
||||
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
|
||||
config.start_percent = cache_params.start_percent;
|
||||
config.end_percent = cache_params.end_percent;
|
||||
|
||||
runtime.easycache.init(config, denoiser);
|
||||
if (!runtime.easycache.enabled()) {
|
||||
LOG_WARN("EasyCache requested but could not be initialized for this run");
|
||||
return;
|
||||
}
|
||||
|
||||
runtime.mode = SampleCacheMode::EASYCACHE;
|
||||
LOG_INFO("EasyCache enabled - threshold: %.3f, start: %.2f, end: %.2f",
|
||||
config.reuse_threshold,
|
||||
config.start_percent,
|
||||
config.end_percent);
|
||||
}
|
||||
|
||||
static void init_ucache_runtime(SampleCacheRuntime& runtime,
|
||||
SDVersion version,
|
||||
const sd_cache_params_t& cache_params,
|
||||
Denoiser* denoiser,
|
||||
const std::vector<float>& sigmas) {
|
||||
if (!sd_version_is_unet(version)) {
|
||||
LOG_WARN("UCache requested but not supported for this model type (only UNET models)");
|
||||
return;
|
||||
}
|
||||
|
||||
UCacheConfig config;
|
||||
config.enabled = true;
|
||||
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
|
||||
config.start_percent = cache_params.start_percent;
|
||||
config.end_percent = cache_params.end_percent;
|
||||
config.error_decay_rate = std::max(0.0f, std::min(1.0f, cache_params.error_decay_rate));
|
||||
config.use_relative_threshold = cache_params.use_relative_threshold;
|
||||
config.reset_error_on_compute = cache_params.reset_error_on_compute;
|
||||
|
||||
runtime.ucache.init(config, denoiser);
|
||||
if (!runtime.ucache.enabled()) {
|
||||
LOG_WARN("UCache requested but could not be initialized for this run");
|
||||
return;
|
||||
}
|
||||
|
||||
runtime.ucache.set_sigmas(sigmas);
|
||||
runtime.mode = SampleCacheMode::UCACHE;
|
||||
LOG_INFO("UCache enabled - threshold: %.3f, start: %.2f, end: %.2f, decay: %.2f, relative: %s, reset: %s",
|
||||
config.reuse_threshold,
|
||||
config.start_percent,
|
||||
config.end_percent,
|
||||
config.error_decay_rate,
|
||||
config.use_relative_threshold ? "true" : "false",
|
||||
config.reset_error_on_compute ? "true" : "false");
|
||||
}
|
||||
|
||||
static void init_cachedit_runtime(SampleCacheRuntime& runtime,
|
||||
SDVersion version,
|
||||
const sd_cache_params_t& cache_params,
|
||||
const std::vector<float>& sigmas) {
|
||||
if (!sd_version_is_dit(version)) {
|
||||
LOG_WARN("CacheDIT requested but not supported for this model type (only DiT models)");
|
||||
return;
|
||||
}
|
||||
|
||||
DBCacheConfig dbcfg;
|
||||
dbcfg.enabled = (cache_params.mode == SD_CACHE_DBCACHE || cache_params.mode == SD_CACHE_CACHE_DIT);
|
||||
dbcfg.Fn_compute_blocks = cache_params.Fn_compute_blocks;
|
||||
dbcfg.Bn_compute_blocks = cache_params.Bn_compute_blocks;
|
||||
dbcfg.residual_diff_threshold = cache_params.residual_diff_threshold;
|
||||
dbcfg.max_warmup_steps = cache_params.max_warmup_steps;
|
||||
dbcfg.max_cached_steps = cache_params.max_cached_steps;
|
||||
dbcfg.max_continuous_cached_steps = cache_params.max_continuous_cached_steps;
|
||||
if (cache_params.scm_mask != nullptr && strlen(cache_params.scm_mask) > 0) {
|
||||
dbcfg.steps_computation_mask = parse_scm_mask(cache_params.scm_mask);
|
||||
}
|
||||
dbcfg.scm_policy_dynamic = cache_params.scm_policy_dynamic;
|
||||
|
||||
TaylorSeerConfig tcfg;
|
||||
tcfg.enabled = (cache_params.mode == SD_CACHE_TAYLORSEER || cache_params.mode == SD_CACHE_CACHE_DIT);
|
||||
tcfg.n_derivatives = cache_params.taylorseer_n_derivatives;
|
||||
tcfg.skip_interval_steps = cache_params.taylorseer_skip_interval;
|
||||
|
||||
runtime.cachedit.init(dbcfg, tcfg);
|
||||
if (!runtime.cachedit.enabled()) {
|
||||
LOG_WARN("CacheDIT requested but could not be initialized for this run");
|
||||
return;
|
||||
}
|
||||
|
||||
runtime.cachedit.set_sigmas(sigmas);
|
||||
runtime.mode = SampleCacheMode::CACHEDIT;
|
||||
LOG_INFO("CacheDIT enabled - mode: %s, Fn: %d, Bn: %d, threshold: %.3f, warmup: %d",
|
||||
cache_params.mode == SD_CACHE_CACHE_DIT ? "DBCache+TaylorSeer" : (cache_params.mode == SD_CACHE_DBCACHE ? "DBCache" : "TaylorSeer"),
|
||||
dbcfg.Fn_compute_blocks,
|
||||
dbcfg.Bn_compute_blocks,
|
||||
dbcfg.residual_diff_threshold,
|
||||
dbcfg.max_warmup_steps);
|
||||
}
|
||||
|
||||
static void init_spectrum_runtime(SampleCacheRuntime& runtime,
|
||||
SDVersion version,
|
||||
const sd_cache_params_t& cache_params,
|
||||
const std::vector<float>& sigmas) {
|
||||
if (!sd_version_is_unet(version) && !sd_version_is_dit(version)) {
|
||||
LOG_WARN("Spectrum requested but not supported for this model type (only UNET and DiT models)");
|
||||
return;
|
||||
}
|
||||
|
||||
SpectrumConfig config;
|
||||
config.w = cache_params.spectrum_w;
|
||||
config.m = cache_params.spectrum_m;
|
||||
config.lam = cache_params.spectrum_lam;
|
||||
config.window_size = cache_params.spectrum_window_size;
|
||||
config.flex_window = cache_params.spectrum_flex_window;
|
||||
config.warmup_steps = cache_params.spectrum_warmup_steps;
|
||||
config.stop_percent = cache_params.spectrum_stop_percent;
|
||||
|
||||
size_t total_steps = sigmas.size() > 0 ? sigmas.size() - 1 : 0;
|
||||
runtime.spectrum.init(config, total_steps);
|
||||
runtime.spectrum_enabled = true;
|
||||
|
||||
LOG_INFO("Spectrum enabled - w: %.2f, m: %d, lam: %.2f, window: %d, flex: %.2f, warmup: %d, stop: %.0f%%",
|
||||
config.w, config.m, config.lam,
|
||||
config.window_size, config.flex_window,
|
||||
config.warmup_steps, config.stop_percent * 100.0f);
|
||||
}
|
||||
|
||||
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
|
||||
const sd_cache_params_t* cache_params,
|
||||
Denoiser* denoiser,
|
||||
const std::vector<float>& sigmas) {
|
||||
SampleCacheRuntime runtime;
|
||||
if (cache_params == nullptr || cache_params->mode == SD_CACHE_DISABLED) {
|
||||
return runtime;
|
||||
}
|
||||
|
||||
if (!has_valid_cache_percent_range(*cache_params)) {
|
||||
LOG_WARN("Cache disabled due to invalid percent range (start=%.3f, end=%.3f)",
|
||||
cache_params->start_percent,
|
||||
cache_params->end_percent);
|
||||
return runtime;
|
||||
}
|
||||
|
||||
switch (cache_params->mode) {
|
||||
case SD_CACHE_EASYCACHE:
|
||||
init_easycache_runtime(runtime, version, *cache_params, denoiser);
|
||||
break;
|
||||
case SD_CACHE_UCACHE:
|
||||
init_ucache_runtime(runtime, version, *cache_params, denoiser, sigmas);
|
||||
break;
|
||||
case SD_CACHE_DBCACHE:
|
||||
case SD_CACHE_TAYLORSEER:
|
||||
case SD_CACHE_CACHE_DIT:
|
||||
init_cachedit_runtime(runtime, version, *cache_params, sigmas);
|
||||
break;
|
||||
case SD_CACHE_SPECTRUM:
|
||||
init_spectrum_runtime(runtime, version, *cache_params, sigmas);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return runtime;
|
||||
}
|
||||
|
||||
SampleStepCacheDispatcher::SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma)
|
||||
: runtime(runtime), step(step), sigma(sigma), step_index(step > 0 ? (step - 1) : -1) {
|
||||
if (step_index < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (runtime.mode) {
|
||||
case SampleCacheMode::EASYCACHE:
|
||||
runtime.easycache.begin_step(step_index, sigma);
|
||||
break;
|
||||
case SampleCacheMode::UCACHE:
|
||||
runtime.ucache.begin_step(step_index, sigma);
|
||||
break;
|
||||
case SampleCacheMode::CACHEDIT:
|
||||
runtime.cachedit.begin_step(step_index, sigma);
|
||||
break;
|
||||
case SampleCacheMode::NONE:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool SampleStepCacheDispatcher::before_condition(const void* condition,
|
||||
const sd::Tensor<float>& input,
|
||||
sd::Tensor<float>* output) {
|
||||
if (step_index < 0 || condition == nullptr || output == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
switch (runtime.mode) {
|
||||
case SampleCacheMode::EASYCACHE:
|
||||
return runtime.easycache.before_condition(condition, input, output, sigma, step_index);
|
||||
case SampleCacheMode::UCACHE:
|
||||
return runtime.ucache.before_condition(condition, input, output, sigma, step_index);
|
||||
case SampleCacheMode::CACHEDIT:
|
||||
return runtime.cachedit.before_condition(condition, input, output, sigma, step_index);
|
||||
case SampleCacheMode::NONE:
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void SampleStepCacheDispatcher::after_condition(const void* condition,
|
||||
const sd::Tensor<float>& input,
|
||||
const sd::Tensor<float>& output) {
|
||||
if (step_index < 0 || condition == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (runtime.mode) {
|
||||
case SampleCacheMode::EASYCACHE:
|
||||
runtime.easycache.after_condition(condition, input, output);
|
||||
break;
|
||||
case SampleCacheMode::UCACHE:
|
||||
runtime.ucache.after_condition(condition, input, output);
|
||||
break;
|
||||
case SampleCacheMode::CACHEDIT:
|
||||
runtime.cachedit.after_condition(condition, input, output);
|
||||
break;
|
||||
case SampleCacheMode::NONE:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool SampleStepCacheDispatcher::is_step_skipped() const {
|
||||
switch (runtime.mode) {
|
||||
case SampleCacheMode::EASYCACHE:
|
||||
return runtime.easycache.is_step_skipped();
|
||||
case SampleCacheMode::UCACHE:
|
||||
return runtime.ucache.is_step_skipped();
|
||||
case SampleCacheMode::CACHEDIT:
|
||||
return runtime.cachedit.is_step_skipped();
|
||||
case SampleCacheMode::NONE:
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps) {
|
||||
if (runtime.easycache_enabled()) {
|
||||
if (runtime.easycache.total_steps_skipped > 0 && total_steps > 0) {
|
||||
if (runtime.easycache.total_steps_skipped < static_cast<int>(total_steps)) {
|
||||
double speedup = static_cast<double>(total_steps) /
|
||||
static_cast<double>(total_steps - runtime.easycache.total_steps_skipped);
|
||||
LOG_INFO("EasyCache skipped %d/%zu steps (%.2fx estimated speedup)",
|
||||
runtime.easycache.total_steps_skipped,
|
||||
total_steps,
|
||||
speedup);
|
||||
} else {
|
||||
LOG_INFO("EasyCache skipped %d/%zu steps",
|
||||
runtime.easycache.total_steps_skipped,
|
||||
total_steps);
|
||||
}
|
||||
} else if (total_steps > 0) {
|
||||
LOG_INFO("EasyCache completed without skipping steps");
|
||||
}
|
||||
}
|
||||
|
||||
if (runtime.ucache_enabled()) {
|
||||
if (runtime.ucache.total_steps_skipped > 0 && total_steps > 0) {
|
||||
if (runtime.ucache.total_steps_skipped < static_cast<int>(total_steps)) {
|
||||
double speedup = static_cast<double>(total_steps) /
|
||||
static_cast<double>(total_steps - runtime.ucache.total_steps_skipped);
|
||||
LOG_INFO("UCache skipped %d/%zu steps (%.2fx estimated speedup)",
|
||||
runtime.ucache.total_steps_skipped,
|
||||
total_steps,
|
||||
speedup);
|
||||
} else {
|
||||
LOG_INFO("UCache skipped %d/%zu steps",
|
||||
runtime.ucache.total_steps_skipped,
|
||||
total_steps);
|
||||
}
|
||||
} else if (total_steps > 0) {
|
||||
LOG_INFO("UCache completed without skipping steps");
|
||||
}
|
||||
}
|
||||
|
||||
if (runtime.cachedit_enabled()) {
|
||||
if (runtime.cachedit.total_steps_skipped > 0 && total_steps > 0) {
|
||||
if (runtime.cachedit.total_steps_skipped < static_cast<int>(total_steps)) {
|
||||
double speedup = static_cast<double>(total_steps) /
|
||||
static_cast<double>(total_steps - runtime.cachedit.total_steps_skipped);
|
||||
LOG_INFO("CacheDIT skipped %d/%zu steps (%.2fx estimated speedup)",
|
||||
runtime.cachedit.total_steps_skipped,
|
||||
total_steps,
|
||||
speedup);
|
||||
} else {
|
||||
LOG_INFO("CacheDIT skipped %d/%zu steps",
|
||||
runtime.cachedit.total_steps_skipped,
|
||||
total_steps);
|
||||
}
|
||||
} else if (total_steps > 0) {
|
||||
LOG_INFO("CacheDIT completed without skipping steps");
|
||||
}
|
||||
}
|
||||
|
||||
if (runtime.spectrum_enabled && runtime.spectrum.total_steps_skipped > 0 && total_steps > 0) {
|
||||
double speedup = static_cast<double>(total_steps) /
|
||||
static_cast<double>(total_steps - runtime.spectrum.total_steps_skipped);
|
||||
LOG_INFO("Spectrum skipped %d/%zu steps (%.2fx estimated speedup)",
|
||||
runtime.spectrum.total_steps_skipped,
|
||||
total_steps,
|
||||
speedup);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace sd_sample
|
||||
61
src/sample-cache.h
Normal file
61
src/sample-cache.h
Normal file
@ -0,0 +1,61 @@
|
||||
#ifndef __SAMPLE_CACHE_H__
|
||||
#define __SAMPLE_CACHE_H__
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "cache_dit.hpp"
|
||||
#include "denoiser.hpp"
|
||||
#include "easycache.hpp"
|
||||
#include "model.h"
|
||||
#include "spectrum.hpp"
|
||||
#include "tensor.hpp"
|
||||
#include "ucache.hpp"
|
||||
#include "util.h"
|
||||
|
||||
namespace sd_sample {
|
||||
|
||||
enum class SampleCacheMode {
|
||||
NONE,
|
||||
EASYCACHE,
|
||||
UCACHE,
|
||||
CACHEDIT,
|
||||
};
|
||||
|
||||
struct SampleCacheRuntime {
|
||||
SampleCacheMode mode = SampleCacheMode::NONE;
|
||||
|
||||
EasyCacheState easycache;
|
||||
UCacheState ucache;
|
||||
CacheDitConditionState cachedit;
|
||||
SpectrumState spectrum;
|
||||
|
||||
bool spectrum_enabled = false;
|
||||
|
||||
bool easycache_enabled() const;
|
||||
bool ucache_enabled() const;
|
||||
bool cachedit_enabled() const;
|
||||
};
|
||||
|
||||
struct SampleStepCacheDispatcher {
|
||||
SampleCacheRuntime& runtime;
|
||||
int step;
|
||||
float sigma;
|
||||
int step_index;
|
||||
|
||||
SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma);
|
||||
|
||||
bool before_condition(const void* condition, const sd::Tensor<float>& input, sd::Tensor<float>* output);
|
||||
void after_condition(const void* condition, const sd::Tensor<float>& input, const sd::Tensor<float>& output);
|
||||
bool is_step_skipped() const;
|
||||
};
|
||||
|
||||
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
|
||||
const sd_cache_params_t* cache_params,
|
||||
Denoiser* denoiser,
|
||||
const std::vector<float>& sigmas);
|
||||
|
||||
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps);
|
||||
|
||||
} // namespace sd_sample
|
||||
|
||||
#endif // __SAMPLE_CACHE_H__
|
||||
187
src/spectrum.hpp
Normal file
187
src/spectrum.hpp
Normal file
@ -0,0 +1,187 @@
|
||||
#ifndef __SPECTRUM_HPP__
|
||||
#define __SPECTRUM_HPP__
|
||||
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
#include "ggml_extend.hpp"
|
||||
#include "tensor.hpp"
|
||||
|
||||
struct SpectrumConfig {
|
||||
float w = 0.40f;
|
||||
int m = 3;
|
||||
float lam = 1.0f;
|
||||
int window_size = 2;
|
||||
float flex_window = 0.50f;
|
||||
int warmup_steps = 4;
|
||||
float stop_percent = 0.9f;
|
||||
};
|
||||
|
||||
struct SpectrumState {
|
||||
SpectrumConfig config;
|
||||
int cnt = 0;
|
||||
int num_cached = 0;
|
||||
float curr_ws = 2.0f;
|
||||
int K = 6;
|
||||
int stop_step = 0;
|
||||
int total_steps_skipped = 0;
|
||||
|
||||
std::vector<std::vector<float>> H_buf;
|
||||
std::vector<float> T_buf;
|
||||
|
||||
void init(const SpectrumConfig& cfg, size_t total_steps) {
|
||||
config = cfg;
|
||||
cnt = 0;
|
||||
num_cached = 0;
|
||||
curr_ws = (float)cfg.window_size;
|
||||
K = std::max(cfg.m + 1, 6);
|
||||
stop_step = (int)(cfg.stop_percent * (float)total_steps);
|
||||
total_steps_skipped = 0;
|
||||
H_buf.clear();
|
||||
T_buf.clear();
|
||||
}
|
||||
|
||||
float taus(int step_cnt) const {
|
||||
return (step_cnt / 50.0f) * 2.0f - 1.0f;
|
||||
}
|
||||
|
||||
bool should_predict() {
|
||||
if (cnt < config.warmup_steps)
|
||||
return false;
|
||||
if (stop_step > 0 && cnt >= stop_step)
|
||||
return false;
|
||||
if ((int)H_buf.size() < 2)
|
||||
return false;
|
||||
|
||||
int ws = std::max(1, (int)std::floor(curr_ws));
|
||||
return (num_cached + 1) % ws != 0;
|
||||
}
|
||||
|
||||
void update(const sd::Tensor<float>& denoised) {
|
||||
H_buf.emplace_back(denoised.data(), denoised.data() + denoised.numel());
|
||||
T_buf.push_back(taus(cnt));
|
||||
|
||||
while ((int)H_buf.size() > K) {
|
||||
H_buf.erase(H_buf.begin());
|
||||
T_buf.erase(T_buf.begin());
|
||||
}
|
||||
|
||||
if (cnt >= config.warmup_steps)
|
||||
curr_ws += config.flex_window;
|
||||
|
||||
num_cached = 0;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
void predict(sd::Tensor<float>* denoised) {
|
||||
GGML_ASSERT(denoised != nullptr);
|
||||
int64_t F = (int64_t)H_buf[0].size();
|
||||
int K_curr = (int)H_buf.size();
|
||||
int M1 = config.m + 1;
|
||||
float tau_at = taus(cnt);
|
||||
|
||||
std::vector<float> X(K_curr * M1);
|
||||
for (int i = 0; i < K_curr; i++) {
|
||||
X[i * M1] = 1.0f;
|
||||
if (M1 > 1)
|
||||
X[i * M1 + 1] = T_buf[i];
|
||||
for (int j = 2; j < M1; j++)
|
||||
X[i * M1 + j] = 2.0f * T_buf[i] * X[i * M1 + j - 1] - X[i * M1 + j - 2];
|
||||
}
|
||||
|
||||
std::vector<float> x_star(M1);
|
||||
x_star[0] = 1.0f;
|
||||
if (M1 > 1)
|
||||
x_star[1] = tau_at;
|
||||
for (int j = 2; j < M1; j++)
|
||||
x_star[j] = 2.0f * tau_at * x_star[j - 1] - x_star[j - 2];
|
||||
|
||||
std::vector<float> XtX(M1 * M1, 0.0f);
|
||||
for (int i = 0; i < M1; i++) {
|
||||
for (int j = 0; j < M1; j++) {
|
||||
float sum = 0.0f;
|
||||
for (int k = 0; k < K_curr; k++)
|
||||
sum += X[k * M1 + i] * X[k * M1 + j];
|
||||
XtX[i * M1 + j] = sum + (i == j ? config.lam : 0.0f);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<float> L(M1 * M1, 0.0f);
|
||||
if (!cholesky_decompose(XtX.data(), L.data(), M1)) {
|
||||
float trace = 0.0f;
|
||||
for (int i = 0; i < M1; i++)
|
||||
trace += XtX[i * M1 + i];
|
||||
for (int i = 0; i < M1; i++)
|
||||
XtX[i * M1 + i] += 1e-4f * trace / M1;
|
||||
cholesky_decompose(XtX.data(), L.data(), M1);
|
||||
}
|
||||
|
||||
std::vector<float> v(M1);
|
||||
cholesky_solve(L.data(), x_star.data(), v.data(), M1);
|
||||
|
||||
std::vector<float> weights(K_curr, 0.0f);
|
||||
for (int k = 0; k < K_curr; k++)
|
||||
for (int j = 0; j < M1; j++)
|
||||
weights[k] += X[k * M1 + j] * v[j];
|
||||
|
||||
float* out = denoised->data();
|
||||
float w_cheb = config.w;
|
||||
float w_taylor = 1.0f - w_cheb;
|
||||
const float* h_last = H_buf.back().data();
|
||||
const float* h_prev = H_buf[H_buf.size() - 2].data();
|
||||
|
||||
for (int64_t f = 0; f < F; f++) {
|
||||
float pred_cheb = 0.0f;
|
||||
for (int k = 0; k < K_curr; k++)
|
||||
pred_cheb += weights[k] * H_buf[k][f];
|
||||
|
||||
float pred_taylor = h_last[f] + 0.5f * (h_last[f] - h_prev[f]);
|
||||
|
||||
out[f] = w_taylor * pred_taylor + w_cheb * pred_cheb;
|
||||
}
|
||||
|
||||
num_cached++;
|
||||
total_steps_skipped++;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
private:
|
||||
static bool cholesky_decompose(const float* A, float* L, int n) {
|
||||
std::memset(L, 0, n * n * sizeof(float));
|
||||
for (int i = 0; i < n; i++) {
|
||||
for (int j = 0; j <= i; j++) {
|
||||
float sum = 0.0f;
|
||||
for (int k = 0; k < j; k++)
|
||||
sum += L[i * n + k] * L[j * n + k];
|
||||
if (i == j) {
|
||||
float diag = A[i * n + i] - sum;
|
||||
if (diag <= 0.0f)
|
||||
return false;
|
||||
L[i * n + j] = std::sqrt(diag);
|
||||
} else {
|
||||
L[i * n + j] = (A[i * n + j] - sum) / L[j * n + j];
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void cholesky_solve(const float* L, const float* b, float* x, int n) {
|
||||
std::vector<float> y(n);
|
||||
for (int i = 0; i < n; i++) {
|
||||
float sum = 0.0f;
|
||||
for (int j = 0; j < i; j++)
|
||||
sum += L[i * n + j] * y[j];
|
||||
y[i] = (b[i] - sum) / L[i * n + i];
|
||||
}
|
||||
for (int i = n - 1; i >= 0; i--) {
|
||||
float sum = 0.0f;
|
||||
for (int j = i + 1; j < n; j++)
|
||||
sum += L[j * n + i] * x[j];
|
||||
x[i] = (y[i] - sum) / L[i * n + i];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif // __SPECTRUM_HPP__
|
||||
File diff suppressed because it is too large
Load Diff
130
src/t5.hpp
130
src/t5.hpp
@ -1,4 +1,4 @@
|
||||
#ifndef __T5_HPP__
|
||||
#ifndef __T5_HPP__
|
||||
#define __T5_HPP__
|
||||
|
||||
#include <cfloat>
|
||||
@ -211,9 +211,9 @@ protected:
|
||||
// implementation. It's based on the following three ideas:
|
||||
//
|
||||
// 1. Because it uses the *unigram* model:
|
||||
// best_score(x1, x2, …, xt) = best_score(x1, x2, …, x{t-1}) + score(xt)
|
||||
// best_score(x1, x2, ... xt) = best_score(x1, x2, ... x{t-1}) + score(xt)
|
||||
// Deciding the best path (and score) can be decoupled into two isolated
|
||||
// terms: (a) the best path ended before the last token `best_score(x1, x2, …,
|
||||
// terms: (a) the best path ended before the last token `best_score(x1, x2, ...)`
|
||||
// x{t-1})`, and (b) the last token and its `score(xt)`. The two terms are
|
||||
// not related to each other at all.
|
||||
//
|
||||
@ -227,7 +227,7 @@ protected:
|
||||
// position, where n is the input length and k is the maximum number of tokens
|
||||
// that can be recognized starting at each position.
|
||||
//
|
||||
// 2. Again, because it uses the *unigram* model, we don’t need to actually
|
||||
// 2. Again, because it uses the *unigram* model, we don't need to actually
|
||||
// store the lattice nodes. We still recognize all the tokens and lattice
|
||||
// nodes from the input, but along identifying them, we use and discard them
|
||||
// on the fly. There is no need to actually store them for best path Viterbi
|
||||
@ -462,7 +462,7 @@ protected:
|
||||
int64_t hidden_size;
|
||||
float eps;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type wtype = GGML_TYPE_F32;
|
||||
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||
}
|
||||
@ -473,8 +473,8 @@ public:
|
||||
: hidden_size(hidden_size),
|
||||
eps(eps) {}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
struct ggml_tensor* w = params["weight"];
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
ggml_tensor* w = params["weight"];
|
||||
x = ggml_rms_norm(ctx->ggml_ctx, x, eps);
|
||||
x = ggml_mul(ctx->ggml_ctx, x, w);
|
||||
return x;
|
||||
@ -488,7 +488,7 @@ public:
|
||||
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N, n_token, model_dim]
|
||||
auto wi = std::dynamic_pointer_cast<Linear>(blocks["wi"]);
|
||||
auto wo = std::dynamic_pointer_cast<Linear>(blocks["wo"]);
|
||||
@ -510,7 +510,7 @@ public:
|
||||
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false, false, false, scale));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N, n_token, model_dim]
|
||||
auto wi_0 = std::dynamic_pointer_cast<Linear>(blocks["wi_0"]);
|
||||
auto wi_1 = std::dynamic_pointer_cast<Linear>(blocks["wi_1"]);
|
||||
@ -531,7 +531,7 @@ public:
|
||||
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N, n_token, model_dim]
|
||||
auto DenseReluDense = std::dynamic_pointer_cast<T5DenseGatedActDense>(blocks["DenseReluDense"]);
|
||||
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
|
||||
@ -570,8 +570,8 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* compute_bias(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* relative_position_bucket) {
|
||||
ggml_tensor* compute_bias(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* relative_position_bucket) {
|
||||
auto relative_attention_bias = std::dynamic_pointer_cast<Embedding>(blocks["relative_attention_bias"]);
|
||||
|
||||
auto values = relative_attention_bias->forward(ctx, relative_position_bucket); // shape (query_length, key_length, num_heads)
|
||||
@ -580,11 +580,11 @@ public:
|
||||
}
|
||||
|
||||
// x: [N, n_token, model_dim]
|
||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* past_bias = nullptr,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* past_bias = nullptr,
|
||||
ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* relative_position_bucket = nullptr) {
|
||||
auto q_proj = std::dynamic_pointer_cast<Linear>(blocks["q"]);
|
||||
auto k_proj = std::dynamic_pointer_cast<Linear>(blocks["k"]);
|
||||
auto v_proj = std::dynamic_pointer_cast<Linear>(blocks["v"]);
|
||||
@ -629,11 +629,11 @@ public:
|
||||
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
||||
}
|
||||
|
||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* past_bias = nullptr,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* past_bias = nullptr,
|
||||
ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* relative_position_bucket = nullptr) {
|
||||
// x: [N, n_token, model_dim]
|
||||
auto SelfAttention = std::dynamic_pointer_cast<T5Attention>(blocks["SelfAttention"]);
|
||||
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
|
||||
@ -655,11 +655,11 @@ public:
|
||||
blocks["layer.1"] = std::shared_ptr<GGMLBlock>(new T5LayerFF(model_dim, ff_dim));
|
||||
}
|
||||
|
||||
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* past_bias = nullptr,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* past_bias = nullptr,
|
||||
ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* relative_position_bucket = nullptr) {
|
||||
// x: [N, n_token, model_dim]
|
||||
auto layer_0 = std::dynamic_pointer_cast<T5LayerSelfAttention>(blocks["layer.0"]);
|
||||
auto layer_1 = std::dynamic_pointer_cast<T5LayerFF>(blocks["layer.1"]);
|
||||
@ -690,11 +690,11 @@ public:
|
||||
blocks["final_layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* past_bias = nullptr,
|
||||
struct ggml_tensor* attention_mask = nullptr,
|
||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* past_bias = nullptr,
|
||||
ggml_tensor* attention_mask = nullptr,
|
||||
ggml_tensor* relative_position_bucket = nullptr) {
|
||||
// x: [N, n_token, model_dim]
|
||||
for (int i = 0; i < num_layers; i++) {
|
||||
auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]);
|
||||
@ -737,11 +737,11 @@ public:
|
||||
params.model_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* past_bias = nullptr,
|
||||
struct ggml_tensor* attention_mask = nullptr,
|
||||
struct ggml_tensor* relative_position_bucket = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* past_bias = nullptr,
|
||||
ggml_tensor* attention_mask = nullptr,
|
||||
ggml_tensor* relative_position_bucket = nullptr) {
|
||||
// input_ids: [N, n_token]
|
||||
|
||||
auto shared = std::dynamic_pointer_cast<Embedding>(blocks["shared"]);
|
||||
@ -776,14 +776,14 @@ struct T5Runner : public GGMLRunner {
|
||||
return "t5";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
model.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* relative_position_bucket,
|
||||
struct ggml_tensor* attention_mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* input_ids,
|
||||
ggml_tensor* relative_position_bucket,
|
||||
ggml_tensor* attention_mask = nullptr) {
|
||||
size_t N = input_ids->ne[1];
|
||||
size_t n_token = input_ids->ne[0];
|
||||
|
||||
@ -791,12 +791,11 @@ struct T5Runner : public GGMLRunner {
|
||||
return hidden_states;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* attention_mask = nullptr) {
|
||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
|
||||
input_ids = to_backend(input_ids);
|
||||
attention_mask = to_backend(attention_mask);
|
||||
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
|
||||
const sd::Tensor<float>& attention_mask_tensor = {}) {
|
||||
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
ggml_tensor* input_ids = make_input(input_ids_tensor);
|
||||
ggml_tensor* attention_mask = attention_mask_tensor.empty() ? nullptr : make_input(attention_mask_tensor);
|
||||
|
||||
relative_position_bucket_vec = compute_relative_position_bucket(static_cast<int>(input_ids->ne[0]), static_cast<int>(input_ids->ne[0]));
|
||||
|
||||
@ -814,22 +813,20 @@ struct T5Runner : public GGMLRunner {
|
||||
set_backend_tensor_data(relative_position_bucket, relative_position_bucket_vec.data());
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
struct ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, relative_position_bucket, attention_mask);
|
||||
ggml_tensor* hidden_states = forward(&runner_ctx, input_ids, relative_position_bucket, attention_mask);
|
||||
|
||||
ggml_build_forward_expand(gf, hidden_states);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* input_ids,
|
||||
struct ggml_tensor* attention_mask,
|
||||
ggml_tensor** output,
|
||||
ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
sd::Tensor<float> compute(const int n_threads,
|
||||
const sd::Tensor<int32_t>& input_ids,
|
||||
const sd::Tensor<float>& attention_mask) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(input_ids, attention_mask);
|
||||
};
|
||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, true), 3);
|
||||
}
|
||||
|
||||
static std::vector<int> _relative_position_bucket(const std::vector<int>& relative_position,
|
||||
@ -912,7 +909,7 @@ struct T5Embedder {
|
||||
: model(backend, offload_params_to_cpu, tensor_storage_map, prefix, is_umt5), tokenizer(is_umt5) {
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
model.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
@ -962,17 +959,16 @@ struct T5Embedder {
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
std::string text("a lovely cat");
|
||||
// std::string text("一只可爱的猫"); // umt5 chinease test
|
||||
auto tokens_and_weights = tokenize(text, 512, true);
|
||||
std::vector<int>& tokens = std::get<0>(tokens_and_weights);
|
||||
std::vector<float>& weights = std::get<1>(tokens_and_weights);
|
||||
@ -981,15 +977,17 @@ struct T5Embedder {
|
||||
printf("%d ", token);
|
||||
}
|
||||
printf("\n");
|
||||
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
|
||||
auto attention_mask = vector_to_ggml_tensor(work_ctx, masks);
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
|
||||
auto attention_mask = sd::Tensor<float>::from_vector(masks);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
model.compute(8, input_ids, attention_mask, &out, work_ctx);
|
||||
auto out_opt = model.compute(8, input_ids, attention_mask);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("t5 test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
199
src/tae.hpp
199
src/tae.hpp
@ -37,7 +37,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [n, n_in, h, w]
|
||||
// return: [n, n_out, h, w]
|
||||
|
||||
@ -107,7 +107,7 @@ public:
|
||||
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [n, in_channels, h, w]
|
||||
// return: [n, z_channels, h/8, w/8]
|
||||
|
||||
@ -157,7 +157,7 @@ public:
|
||||
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
|
||||
// z: [n, z_channels, h, w]
|
||||
// return: [n, out_channels, h*8, w*8]
|
||||
|
||||
@ -192,7 +192,7 @@ public:
|
||||
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels * stride, channels, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
|
||||
auto h = x;
|
||||
if (stride != 1) {
|
||||
@ -212,7 +212,7 @@ public:
|
||||
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, channels * stride, {1, 1}, {1, 1}, {0, 0}, {1, 1}, false));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
auto conv = std::dynamic_pointer_cast<UnaryBlock>(blocks["conv"]);
|
||||
auto h = conv->forward(ctx, x);
|
||||
if (stride != 1) {
|
||||
@ -236,7 +236,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* past) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* past) {
|
||||
// x: [n, channels, h, w]
|
||||
auto conv0 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.0"]);
|
||||
auto conv1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv.2"]);
|
||||
@ -260,8 +260,8 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
struct ggml_tensor* patchify(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* patchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t patch_size,
|
||||
int64_t b = 1) {
|
||||
// x: [f, b*c, h*q, w*r]
|
||||
@ -289,8 +289,8 @@ struct ggml_tensor* patchify(struct ggml_context* ctx,
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t patch_size,
|
||||
int64_t b = 1) {
|
||||
// x: [f, b*c*r*q, h, w]
|
||||
@ -339,7 +339,7 @@ public:
|
||||
blocks[std::to_string(index)] = std::shared_ptr<GGMLBlock>(new Conv2d(hidden, z_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
|
||||
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["0"]);
|
||||
|
||||
if (patch_size > 1) {
|
||||
@ -396,7 +396,7 @@ public:
|
||||
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels[num_layers], out_channels * patch_size * patch_size, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* z) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* z) override {
|
||||
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["1"]);
|
||||
|
||||
// Clamp()
|
||||
@ -442,10 +442,12 @@ protected:
|
||||
bool decode_only;
|
||||
SDVersion version;
|
||||
|
||||
public:
|
||||
int z_channels = 16;
|
||||
|
||||
public:
|
||||
TAEHV(bool decode_only = true, SDVersion version = VERSION_WAN2)
|
||||
: decode_only(decode_only), version(version) {
|
||||
int z_channels = 16;
|
||||
int patch = 1;
|
||||
if (version == VERSION_WAN2_2_TI2V) {
|
||||
z_channels = 48;
|
||||
@ -457,7 +459,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) {
|
||||
ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||
auto decoder = std::dynamic_pointer_cast<TinyVideoDecoder>(blocks["decoder"]);
|
||||
if (sd_version_is_wan(version)) {
|
||||
// (W, H, C, T) -> (W, H, T, C)
|
||||
@ -471,7 +473,7 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto encoder = std::dynamic_pointer_cast<TinyVideoEncoder>(blocks["encoder"]);
|
||||
// (W, H, T, C) -> (W, H, C, T)
|
||||
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 1, 3, 2));
|
||||
@ -494,10 +496,12 @@ protected:
|
||||
bool decode_only;
|
||||
bool taef2 = false;
|
||||
|
||||
public:
|
||||
int z_channels = 4;
|
||||
|
||||
public:
|
||||
TAESD(bool decode_only = true, SDVersion version = VERSION_SD1)
|
||||
: decode_only(decode_only) {
|
||||
int z_channels = 4;
|
||||
bool use_midblock_gn = false;
|
||||
taef2 = sd_version_is_flux2(version);
|
||||
|
||||
@ -515,7 +519,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx, struct ggml_tensor* z) {
|
||||
ggml_tensor* decode(GGMLRunnerContext* ctx, ggml_tensor* z) {
|
||||
auto decoder = std::dynamic_pointer_cast<TinyDecoder>(blocks["decoder.layers"]);
|
||||
if (taef2) {
|
||||
z = unpatchify(ctx->ggml_ctx, z, 2);
|
||||
@ -523,7 +527,7 @@ public:
|
||||
return decoder->forward(ctx, z);
|
||||
}
|
||||
|
||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* encode(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto encoder = std::dynamic_pointer_cast<TinyEncoder>(blocks["encoder.layers"]);
|
||||
auto z = encoder->forward(ctx, x);
|
||||
if (taef2) {
|
||||
@ -533,20 +537,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
struct TinyAutoEncoder : public GGMLRunner {
|
||||
TinyAutoEncoder(ggml_backend_t backend, bool offload_params_to_cpu)
|
||||
: GGMLRunner(backend, offload_params_to_cpu) {}
|
||||
virtual bool compute(const int n_threads,
|
||||
struct ggml_tensor* z,
|
||||
bool decode_graph,
|
||||
struct ggml_tensor** output,
|
||||
struct ggml_context* output_ctx = nullptr) = 0;
|
||||
|
||||
virtual bool load_from_file(const std::string& file_path, int n_threads) = 0;
|
||||
virtual void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) = 0;
|
||||
};
|
||||
|
||||
struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
||||
struct TinyImageAutoEncoder : public VAE {
|
||||
TAESD taesd;
|
||||
bool decode_only = false;
|
||||
|
||||
@ -558,7 +549,8 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
||||
SDVersion version = VERSION_SD1)
|
||||
: decode_only(decoder_only),
|
||||
taesd(decoder_only, version),
|
||||
TinyAutoEncoder(backend, offload_params_to_cpu) {
|
||||
VAE(version, backend, offload_params_to_cpu) {
|
||||
scale_input = false;
|
||||
taesd.init(params_ctx, tensor_storage_map, prefix);
|
||||
}
|
||||
|
||||
@ -566,60 +558,48 @@ struct TinyImageAutoEncoder : public TinyAutoEncoder {
|
||||
return "taesd";
|
||||
}
|
||||
|
||||
bool load_from_file(const std::string& file_path, int n_threads) {
|
||||
LOG_INFO("loading taesd from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false");
|
||||
alloc_params_buffer();
|
||||
std::map<std::string, ggml_tensor*> taesd_tensors;
|
||||
taesd.get_param_tensors(taesd_tensors);
|
||||
std::set<std::string> ignore_tensors;
|
||||
if (decode_only) {
|
||||
ignore_tensors.insert("encoder.");
|
||||
}
|
||||
|
||||
ModelLoader model_loader;
|
||||
if (!model_loader.init_from_file_and_convert_name(file_path)) {
|
||||
LOG_ERROR("init taesd model loader from file failed: '%s'", file_path.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
bool success = model_loader.load_tensors(taesd_tensors, ignore_tensors, n_threads);
|
||||
|
||||
if (!success) {
|
||||
LOG_ERROR("load tae tensors from model loader failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_INFO("taesd model loaded");
|
||||
return success;
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
taesd.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
z = to_backend(z);
|
||||
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
|
||||
SD_UNUSED(rng);
|
||||
return vae_output;
|
||||
}
|
||||
|
||||
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
|
||||
return latents;
|
||||
}
|
||||
|
||||
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
|
||||
return latents;
|
||||
}
|
||||
|
||||
int get_encoder_output_channels(int input_channels) {
|
||||
return taesd.z_channels;
|
||||
}
|
||||
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
|
||||
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
ggml_tensor* z = make_input(z_tensor);
|
||||
auto runner_ctx = get_context();
|
||||
struct ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
|
||||
ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
|
||||
ggml_build_forward_expand(gf, out);
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* z,
|
||||
bool decode_graph,
|
||||
struct ggml_tensor** output,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
return build_graph(z, decode_graph);
|
||||
sd::Tensor<float> _compute(const int n_threads,
|
||||
const sd::Tensor<float>& z_tensor,
|
||||
bool decode_graph) override {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(z_tensor, decode_graph);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim());
|
||||
}
|
||||
};
|
||||
|
||||
struct TinyVideoAutoEncoder : public TinyAutoEncoder {
|
||||
struct TinyVideoAutoEncoder : public VAE {
|
||||
TAEHV taehv;
|
||||
bool decode_only = false;
|
||||
|
||||
@ -631,7 +611,8 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder {
|
||||
SDVersion version = VERSION_WAN2)
|
||||
: decode_only(decoder_only),
|
||||
taehv(decoder_only, version),
|
||||
TinyAutoEncoder(backend, offload_params_to_cpu) {
|
||||
VAE(version, backend, offload_params_to_cpu) {
|
||||
scale_input = false;
|
||||
taehv.init(params_ctx, tensor_storage_map, prefix);
|
||||
}
|
||||
|
||||
@ -639,56 +620,44 @@ struct TinyVideoAutoEncoder : public TinyAutoEncoder {
|
||||
return "taehv";
|
||||
}
|
||||
|
||||
bool load_from_file(const std::string& file_path, int n_threads) {
|
||||
LOG_INFO("loading taehv from '%s', decode_only = %s", file_path.c_str(), decode_only ? "true" : "false");
|
||||
alloc_params_buffer();
|
||||
std::map<std::string, ggml_tensor*> taehv_tensors;
|
||||
taehv.get_param_tensors(taehv_tensors);
|
||||
std::set<std::string> ignore_tensors;
|
||||
if (decode_only) {
|
||||
ignore_tensors.insert("encoder.");
|
||||
}
|
||||
|
||||
ModelLoader model_loader;
|
||||
if (!model_loader.init_from_file(file_path)) {
|
||||
LOG_ERROR("init taehv model loader from file failed: '%s'", file_path.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
bool success = model_loader.load_tensors(taehv_tensors, ignore_tensors, n_threads);
|
||||
|
||||
if (!success) {
|
||||
LOG_ERROR("load tae tensors from model loader failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_INFO("taehv model loaded");
|
||||
return success;
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
taehv.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
||||
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
z = to_backend(z);
|
||||
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
|
||||
SD_UNUSED(rng);
|
||||
return vae_output;
|
||||
}
|
||||
|
||||
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
|
||||
return latents;
|
||||
}
|
||||
|
||||
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
|
||||
return latents;
|
||||
}
|
||||
|
||||
int get_encoder_output_channels(int input_channels) {
|
||||
return taehv.z_channels;
|
||||
}
|
||||
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
|
||||
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
|
||||
ggml_tensor* z = make_input(z_tensor);
|
||||
auto runner_ctx = get_context();
|
||||
struct ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
|
||||
ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
|
||||
ggml_build_forward_expand(gf, out);
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* z,
|
||||
bool decode_graph,
|
||||
struct ggml_tensor** output,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
return build_graph(z, decode_graph);
|
||||
sd::Tensor<float> _compute(const int n_threads,
|
||||
const sd::Tensor<float>& z_tensor,
|
||||
bool decode_graph) override {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(z_tensor, decode_graph);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
1249
src/tensor.hpp
Normal file
1249
src/tensor.hpp
Normal file
File diff suppressed because it is too large
Load Diff
127
src/tensor_ggml.hpp
Normal file
127
src/tensor_ggml.hpp
Normal file
@ -0,0 +1,127 @@
|
||||
#ifndef __SD_TENSOR_GGML_HPP__
|
||||
#define __SD_TENSOR_GGML_HPP__
|
||||
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
||||
#include "ggml.h"
|
||||
#include "tensor.hpp"
|
||||
|
||||
namespace sd {
|
||||
|
||||
template <typename T>
|
||||
struct GGMLTypeTraits;
|
||||
|
||||
template <>
|
||||
struct GGMLTypeTraits<float> {
|
||||
static constexpr ggml_type type = GGML_TYPE_F32;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GGMLTypeTraits<ggml_fp16_t> {
|
||||
static constexpr ggml_type type = GGML_TYPE_F16;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GGMLTypeTraits<int32_t> {
|
||||
static constexpr ggml_type type = GGML_TYPE_I32;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct GGMLTypeTraits<int64_t> {
|
||||
static constexpr ggml_type type = GGML_TYPE_I64;
|
||||
};
|
||||
|
||||
inline std::vector<int64_t> shape_from_ggml(const ggml_tensor* tensor) {
|
||||
std::vector<int64_t> shape;
|
||||
shape.reserve(static_cast<size_t>(ggml_n_dims(tensor)));
|
||||
for (int i = 0; i < ggml_n_dims(tensor); ++i) {
|
||||
shape.push_back(tensor->ne[i]);
|
||||
}
|
||||
return shape;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Tensor<T> make_sd_tensor_from_ggml(const ggml_tensor* tensor) {
|
||||
if (tensor == nullptr) {
|
||||
return {};
|
||||
}
|
||||
if (tensor->type != GGMLTypeTraits<T>::type) {
|
||||
GGML_ABORT("ggml tensor type does not match sd::Tensor type");
|
||||
}
|
||||
Tensor<T> result(shape_from_ggml(tensor));
|
||||
if (tensor->buffer != nullptr) {
|
||||
ggml_backend_tensor_get(tensor, result.data(), 0, ggml_nbytes(tensor));
|
||||
} else {
|
||||
std::memcpy(result.data(), tensor->data, ggml_nbytes(tensor));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline ggml_tensor* make_ggml_tensor(ggml_context* ctx, const Tensor<T>& tensor, bool copy_data = true) {
|
||||
GGML_ASSERT(tensor.dim() > 0 && tensor.dim() <= 5);
|
||||
|
||||
int n_dims = std::min(static_cast<int>(tensor.dim()), GGML_MAX_DIMS);
|
||||
|
||||
std::array<int64_t, GGML_MAX_DIMS> ne = {1, 1, 1, 1};
|
||||
for (int64_t i = 0; i < n_dims; ++i) {
|
||||
ne[static_cast<size_t>(i)] = tensor.shape()[static_cast<size_t>(i)];
|
||||
}
|
||||
|
||||
if (tensor.dim() == 5) {
|
||||
ne[3] *= tensor.shape()[4];
|
||||
}
|
||||
|
||||
ggml_tensor* result = ggml_new_tensor(ctx, GGMLTypeTraits<T>::type, n_dims, ne.data());
|
||||
if (copy_data && tensor.numel() > 0) {
|
||||
std::memcpy(result->data, tensor.data(), static_cast<size_t>(ggml_nbytes(result)));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Tensor<T> load_tensor_from_file_as_tensor(const std::string& file_path) {
|
||||
std::ifstream file(file_path, std::ios::binary);
|
||||
if (!file.is_open()) {
|
||||
throw std::runtime_error("failed to open tensor file: " + file_path);
|
||||
}
|
||||
|
||||
int32_t n_dims = 0;
|
||||
int32_t length = 0;
|
||||
int32_t ttype = 0;
|
||||
file.read(reinterpret_cast<char*>(&n_dims), sizeof(n_dims));
|
||||
file.read(reinterpret_cast<char*>(&length), sizeof(length));
|
||||
file.read(reinterpret_cast<char*>(&ttype), sizeof(ttype));
|
||||
if (!file.good()) {
|
||||
throw std::runtime_error("incomplete tensor file header: " + file_path);
|
||||
}
|
||||
if (static_cast<ggml_type>(ttype) != GGMLTypeTraits<T>::type) {
|
||||
throw std::invalid_argument("tensor file type does not match requested sd::Tensor type");
|
||||
}
|
||||
|
||||
std::vector<int64_t> shape(4, 1);
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
int32_t dim = 1;
|
||||
file.read(reinterpret_cast<char*>(&dim), sizeof(dim));
|
||||
shape[static_cast<size_t>(i)] = dim;
|
||||
}
|
||||
std::string name(static_cast<size_t>(length), '\0');
|
||||
file.read(name.data(), length);
|
||||
|
||||
shape.resize(static_cast<size_t>(n_dims));
|
||||
Tensor<T> tensor(shape);
|
||||
file.read(reinterpret_cast<char*>(tensor.data()), static_cast<std::streamsize>(tensor.numel() * sizeof(T)));
|
||||
if (!file.good()) {
|
||||
throw std::runtime_error("incomplete tensor file data: " + file_path);
|
||||
}
|
||||
return tensor;
|
||||
}
|
||||
|
||||
} // namespace sd
|
||||
|
||||
#endif
|
||||
@ -1,4 +1,4 @@
|
||||
#include <algorithm>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
@ -982,7 +982,7 @@ std::vector<std::string> split_with_special_tokens(
|
||||
}
|
||||
|
||||
// int main() {
|
||||
// std::string text = "I'm testing C++ token_split function. 你好,世界! 123";
|
||||
// std::string text = "I'm testing C++ token_split function. Hello world 123";
|
||||
// auto tokens = token_split(text);
|
||||
|
||||
// for (const auto& t : tokens) {
|
||||
|
||||
@ -6,8 +6,10 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "condition_cache_utils.hpp"
|
||||
#include "denoiser.hpp"
|
||||
#include "ggml_extend.hpp"
|
||||
#include "tensor.hpp"
|
||||
|
||||
struct UCacheConfig {
|
||||
bool enabled = false;
|
||||
@ -36,8 +38,8 @@ struct UCacheState {
|
||||
bool initial_step = true;
|
||||
bool skip_current_step = false;
|
||||
bool step_active = false;
|
||||
const SDCondition* anchor_condition = nullptr;
|
||||
std::unordered_map<const SDCondition*, UCacheCacheEntry> cache_diffs;
|
||||
const void* anchor_condition = nullptr;
|
||||
std::unordered_map<const void*, UCacheCacheEntry> cache_diffs;
|
||||
std::vector<float> prev_input;
|
||||
std::vector<float> prev_output;
|
||||
float output_prev_norm = 0.0f;
|
||||
@ -233,43 +235,30 @@ struct UCacheState {
|
||||
return base_threshold * multiplier;
|
||||
}
|
||||
|
||||
bool has_cache(const SDCondition* cond) const {
|
||||
bool has_cache(const void* cond) const {
|
||||
auto it = cache_diffs.find(cond);
|
||||
return it != cache_diffs.end() && !it->second.diff.empty();
|
||||
}
|
||||
|
||||
void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
|
||||
UCacheCacheEntry& entry = cache_diffs[cond];
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(output));
|
||||
entry.diff.resize(ne);
|
||||
float* out_data = (float*)output->data;
|
||||
float* in_data = (float*)input->data;
|
||||
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
entry.diff[i] = out_data[i] - in_data[i];
|
||||
}
|
||||
sd::store_condition_cache_diff(&entry.diff, input, output);
|
||||
}
|
||||
|
||||
void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||
void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) {
|
||||
auto it = cache_diffs.find(cond);
|
||||
if (it == cache_diffs.end() || it->second.diff.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
copy_ggml_tensor(output, input);
|
||||
float* out_data = (float*)output->data;
|
||||
const std::vector<float>& diff = it->second.diff;
|
||||
for (size_t i = 0; i < diff.size(); ++i) {
|
||||
out_data[i] += diff[i];
|
||||
}
|
||||
sd::apply_condition_cache_diff(it->second.diff, input, output);
|
||||
}
|
||||
|
||||
bool before_condition(const SDCondition* cond,
|
||||
ggml_tensor* input,
|
||||
ggml_tensor* output,
|
||||
bool before_condition(const void* cond,
|
||||
const sd::Tensor<float>& input,
|
||||
sd::Tensor<float>* output,
|
||||
float sigma,
|
||||
int step_index) {
|
||||
if (!enabled() || step_index < 0) {
|
||||
if (!enabled() || step_index < 0 || output == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (step_index != current_step_index) {
|
||||
@ -302,12 +291,12 @@ struct UCacheState {
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
||||
size_t ne = static_cast<size_t>(input.numel());
|
||||
if (prev_input.size() != ne) {
|
||||
return false;
|
||||
}
|
||||
|
||||
float* input_data = (float*)input->data;
|
||||
const float* input_data = input.data();
|
||||
last_input_change = 0.0f;
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
last_input_change += std::fabs(input_data[i] - prev_input[i]);
|
||||
@ -354,7 +343,7 @@ struct UCacheState {
|
||||
return false;
|
||||
}
|
||||
|
||||
void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
|
||||
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
|
||||
if (!step_is_active()) {
|
||||
return;
|
||||
}
|
||||
@ -367,15 +356,15 @@ struct UCacheState {
|
||||
steps_computed_since_active++;
|
||||
consecutive_skipped_steps = 0;
|
||||
|
||||
size_t ne = static_cast<size_t>(ggml_nelements(input));
|
||||
float* in_data = (float*)input->data;
|
||||
size_t ne = static_cast<size_t>(input.numel());
|
||||
const float* in_data = input.data();
|
||||
prev_input.resize(ne);
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
prev_input[i] = in_data[i];
|
||||
}
|
||||
has_prev_input = true;
|
||||
|
||||
float* out_data = (float*)output->data;
|
||||
const float* out_data = output.data();
|
||||
float output_change = 0.0f;
|
||||
if (has_prev_output && prev_output.size() == ne) {
|
||||
for (size_t i = 0; i < ne; ++i) {
|
||||
|
||||
131
src/unet.hpp
131
src/unet.hpp
@ -60,9 +60,9 @@ public:
|
||||
blocks["time_mixer"] = std::shared_ptr<GGMLBlock>(new AlphaBlender());
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context,
|
||||
int timesteps) {
|
||||
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w], t == timesteps
|
||||
// context: [N, max_position(aka n_context), hidden_size(aka context_dim)] aka [b*t, n_context, context_dim], t == timesteps
|
||||
@ -388,10 +388,10 @@ public:
|
||||
blocks["out.2"] = std::shared_ptr<GGMLBlock>(new Conv2d(model_channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* resblock_forward(std::string name,
|
||||
ggml_tensor* resblock_forward(std::string name,
|
||||
GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* emb,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* emb,
|
||||
int num_video_frames) {
|
||||
if (version == VERSION_SVD) {
|
||||
auto block = std::dynamic_pointer_cast<VideoResBlock>(blocks[name]);
|
||||
@ -404,10 +404,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* attention_layer_forward(std::string name,
|
||||
ggml_tensor* attention_layer_forward(std::string name,
|
||||
GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context,
|
||||
int timesteps) {
|
||||
if (version == VERSION_SVD) {
|
||||
auto block = std::dynamic_pointer_cast<SpatialVideoTransformer>(blocks[name]);
|
||||
@ -420,14 +420,14 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat = nullptr,
|
||||
struct ggml_tensor* y = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timesteps,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* c_concat = nullptr,
|
||||
ggml_tensor* y = nullptr,
|
||||
int num_video_frames = -1,
|
||||
std::vector<struct ggml_tensor*> controls = {},
|
||||
std::vector<ggml_tensor*> controls = {},
|
||||
float control_strength = 0.f) {
|
||||
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
|
||||
// timesteps: [N,]
|
||||
@ -480,7 +480,7 @@ public:
|
||||
}
|
||||
|
||||
// input_blocks
|
||||
std::vector<struct ggml_tensor*> hs;
|
||||
std::vector<ggml_tensor*> hs;
|
||||
|
||||
// input block 0
|
||||
auto h = input_blocks_0_0->forward(ctx, x);
|
||||
@ -605,37 +605,38 @@ struct UNetModelRunner : public GGMLRunner {
|
||||
return "unet";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
unet.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat = nullptr,
|
||||
struct ggml_tensor* y = nullptr,
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor = {},
|
||||
const sd::Tensor<float>& c_concat_tensor = {},
|
||||
const sd::Tensor<float>& y_tensor = {},
|
||||
int num_video_frames = -1,
|
||||
std::vector<struct ggml_tensor*> controls = {},
|
||||
const std::vector<sd::Tensor<float>>& controls_tensor = {},
|
||||
float control_strength = 0.f) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
|
||||
ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
|
||||
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
ggml_tensor* context = make_optional_input(context_tensor);
|
||||
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
|
||||
ggml_tensor* y = make_optional_input(y_tensor);
|
||||
std::vector<ggml_tensor*> controls;
|
||||
controls.reserve(controls_tensor.size());
|
||||
for (const auto& control_tensor : controls_tensor) {
|
||||
controls.push_back(make_input(control_tensor));
|
||||
}
|
||||
|
||||
if (num_video_frames == -1) {
|
||||
num_video_frames = static_cast<int>(x->ne[3]);
|
||||
}
|
||||
|
||||
x = to_backend(x);
|
||||
context = to_backend(context);
|
||||
y = to_backend(y);
|
||||
timesteps = to_backend(timesteps);
|
||||
c_concat = to_backend(c_concat);
|
||||
|
||||
for (int i = 0; i < controls.size(); i++) {
|
||||
controls[i] = to_backend(controls[i]);
|
||||
}
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = unet.forward(&runner_ctx,
|
||||
ggml_tensor* out = unet.forward(&runner_ctx,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
@ -650,37 +651,35 @@ struct UNetModelRunner : public GGMLRunner {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* c_concat,
|
||||
struct ggml_tensor* y,
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context = {},
|
||||
const sd::Tensor<float>& c_concat = {},
|
||||
const sd::Tensor<float>& y = {},
|
||||
int num_video_frames = -1,
|
||||
std::vector<struct ggml_tensor*> controls = {},
|
||||
float control_strength = 0.f,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
const std::vector<sd::Tensor<float>>& controls = {},
|
||||
float control_strength = 0.f) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// timesteps: [N, ]
|
||||
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
|
||||
// c_concat: [N, in_channels, h, w] or [1, in_channels, h, w]
|
||||
// y: [N, adm_in_channels] or [1, adm_in_channels]
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass
|
||||
@ -689,27 +688,37 @@ struct UNetModelRunner : public GGMLRunner {
|
||||
// CUDA, num_video_frames = 3, x{num_video_frames, 8, 8, 8}: nan
|
||||
int num_video_frames = 3;
|
||||
|
||||
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 8, num_video_frames);
|
||||
sd::Tensor<float> x({8, 8, 8, num_video_frames});
|
||||
std::vector<float> timesteps_vec(num_video_frames, 999.f);
|
||||
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
|
||||
ggml_set_f32(x, 0.5f);
|
||||
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
|
||||
x.fill_(0.5f);
|
||||
// print_ggml_tensor(x);
|
||||
|
||||
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 1024, 1, num_video_frames);
|
||||
ggml_set_f32(context, 0.5f);
|
||||
sd::Tensor<float> context({1024, 1, num_video_frames});
|
||||
context.fill_(0.5f);
|
||||
// print_ggml_tensor(context);
|
||||
|
||||
auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, num_video_frames);
|
||||
ggml_set_f32(y, 0.5f);
|
||||
sd::Tensor<float> y({768, num_video_frames});
|
||||
y.fill_(0.5f);
|
||||
// print_ggml_tensor(y);
|
||||
|
||||
struct ggml_tensor* out = nullptr;
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx);
|
||||
auto out_opt = compute(8,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
{},
|
||||
y,
|
||||
num_video_frames,
|
||||
{},
|
||||
0.f);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("unet test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
#include "ggml_extend.hpp"
|
||||
#include "model.h"
|
||||
#include "stable-diffusion.h"
|
||||
#include "util.h"
|
||||
|
||||
struct UpscalerGGML {
|
||||
ggml_backend_t backend = nullptr; // general backend
|
||||
@ -64,6 +65,39 @@ struct UpscalerGGML {
|
||||
return true;
|
||||
}
|
||||
|
||||
sd::Tensor<float> upscale_tensor(const sd::Tensor<float>& input_tensor) {
|
||||
sd::Tensor<float> upscaled;
|
||||
if (tile_size <= 0 || (input_tensor.shape()[0] <= tile_size && input_tensor.shape()[1] <= tile_size)) {
|
||||
upscaled = esrgan_upscaler->compute(n_threads, input_tensor);
|
||||
} else {
|
||||
auto on_processing = [&](const sd::Tensor<float>& input_tile) -> sd::Tensor<float> {
|
||||
auto output_tile = esrgan_upscaler->compute(n_threads, input_tile);
|
||||
if (output_tile.empty()) {
|
||||
LOG_ERROR("esrgan compute failed while processing a tile");
|
||||
return {};
|
||||
}
|
||||
return output_tile;
|
||||
};
|
||||
|
||||
upscaled = process_tiles_2d(input_tensor,
|
||||
static_cast<int>(input_tensor.shape()[0] * esrgan_upscaler->scale),
|
||||
static_cast<int>(input_tensor.shape()[1] * esrgan_upscaler->scale),
|
||||
esrgan_upscaler->scale,
|
||||
tile_size,
|
||||
tile_size,
|
||||
0.25f,
|
||||
false,
|
||||
false,
|
||||
on_processing);
|
||||
}
|
||||
esrgan_upscaler->free_compute_buffer();
|
||||
if (upscaled.empty()) {
|
||||
LOG_ERROR("esrgan compute failed");
|
||||
return {};
|
||||
}
|
||||
return upscaled;
|
||||
}
|
||||
|
||||
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) {
|
||||
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
|
||||
sd_image_t upscaled_image = {0, 0, 0, nullptr};
|
||||
@ -72,39 +106,17 @@ struct UpscalerGGML {
|
||||
LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
|
||||
input_image.width, input_image.height, output_width, output_height);
|
||||
|
||||
struct ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
// draft context
|
||||
struct ggml_context* upscale_ctx = ggml_init(params);
|
||||
if (!upscale_ctx) {
|
||||
LOG_ERROR("ggml_init() failed");
|
||||
sd::Tensor<float> input_tensor = sd_image_to_tensor(input_image);
|
||||
sd::Tensor<float> upscaled;
|
||||
int64_t t0 = ggml_time_ms();
|
||||
upscaled = upscale_tensor(input_tensor);
|
||||
if (upscaled.empty()) {
|
||||
return upscaled_image;
|
||||
}
|
||||
// LOG_DEBUG("upscale work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
|
||||
ggml_tensor* input_image_tensor = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, input_image.width, input_image.height, 3, 1);
|
||||
sd_image_to_ggml_tensor(input_image, input_image_tensor);
|
||||
|
||||
ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1);
|
||||
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||
return esrgan_upscaler->compute(n_threads, in, &out);
|
||||
};
|
||||
int64_t t0 = ggml_time_ms();
|
||||
sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, on_tiling);
|
||||
esrgan_upscaler->free_compute_buffer();
|
||||
ggml_ext_tensor_clamp_inplace(upscaled, 0.f, 1.f);
|
||||
uint8_t* upscaled_data = ggml_tensor_to_sd_image(upscaled);
|
||||
ggml_free(upscale_ctx);
|
||||
sd_image_t upscaled_data = tensor_to_sd_image(upscaled);
|
||||
int64_t t3 = ggml_time_ms();
|
||||
LOG_INFO("input_image_tensor upscaled, taking %.2fs", (t3 - t0) / 1000.0f);
|
||||
upscaled_image = {
|
||||
(uint32_t)output_width,
|
||||
(uint32_t)output_height,
|
||||
3,
|
||||
upscaled_data,
|
||||
};
|
||||
upscaled_image = upscaled_data;
|
||||
return upscaled_image;
|
||||
}
|
||||
};
|
||||
|
||||
192
src/util.cpp
192
src/util.cpp
@ -479,158 +479,96 @@ const char* sd_get_system_info() {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image) {
|
||||
sd_image_f32_t converted_image;
|
||||
converted_image.width = image.width;
|
||||
converted_image.height = image.height;
|
||||
converted_image.channel = image.channel;
|
||||
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index) {
|
||||
const auto& shape = tensor.shape();
|
||||
GGML_ASSERT(shape.size() == 4 || shape.size() == 5);
|
||||
int width = static_cast<int>(shape[0]);
|
||||
int height = static_cast<int>(shape[1]);
|
||||
int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
|
||||
uint8_t* data = (uint8_t*)malloc(static_cast<size_t>(width * height * channel));
|
||||
GGML_ASSERT(data != nullptr);
|
||||
|
||||
// Allocate memory for float data
|
||||
converted_image.data = (float*)malloc(image.width * image.height * image.channel * sizeof(float));
|
||||
|
||||
for (uint32_t i = 0; i < image.width * image.height * image.channel; i++) {
|
||||
// Convert uint8_t to float
|
||||
converted_image.data[i] = (float)image.data[i];
|
||||
}
|
||||
|
||||
return converted_image;
|
||||
}
|
||||
|
||||
// Function to perform double linear interpolation
|
||||
float interpolate(float v1, float v2, float v3, float v4, float x_ratio, float y_ratio) {
|
||||
return v1 * (1 - x_ratio) * (1 - y_ratio) + v2 * x_ratio * (1 - y_ratio) + v3 * (1 - x_ratio) * y_ratio + v4 * x_ratio * y_ratio;
|
||||
}
|
||||
|
||||
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height) {
|
||||
sd_image_f32_t resized_image;
|
||||
resized_image.width = target_width;
|
||||
resized_image.height = target_height;
|
||||
resized_image.channel = image.channel;
|
||||
|
||||
// Allocate memory for resized float data
|
||||
resized_image.data = (float*)malloc(target_width * target_height * image.channel * sizeof(float));
|
||||
|
||||
for (int y = 0; y < target_height; y++) {
|
||||
for (int x = 0; x < target_width; x++) {
|
||||
float original_x = (float)x * image.width / target_width;
|
||||
float original_y = (float)y * image.height / target_height;
|
||||
|
||||
uint32_t x1 = (uint32_t)original_x;
|
||||
uint32_t y1 = (uint32_t)original_y;
|
||||
uint32_t x2 = std::min(x1 + 1, image.width - 1);
|
||||
uint32_t y2 = std::min(y1 + 1, image.height - 1);
|
||||
|
||||
for (uint32_t k = 0; k < image.channel; k++) {
|
||||
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
|
||||
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
|
||||
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
|
||||
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
|
||||
|
||||
float x_ratio = original_x - x1;
|
||||
float y_ratio = original_y - y1;
|
||||
|
||||
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
|
||||
|
||||
*(resized_image.data + y * target_width * image.channel + x * image.channel + k) = value;
|
||||
for (int iw = 0; iw < width; ++iw) {
|
||||
for (int ih = 0; ih < height; ++ih) {
|
||||
for (int ic = 0; ic < channel; ++ic) {
|
||||
float value = shape.size() == 5 ? tensor.index(iw, ih, frame_index, ic, 0)
|
||||
: tensor.index(iw, ih, ic, frame_index);
|
||||
value = std::clamp(value, 0.0f, 1.0f);
|
||||
data[(ih * width + iw) * channel + ic] = static_cast<uint8_t>(std::round(value * 255.0f));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resized_image;
|
||||
return {
|
||||
static_cast<uint32_t>(width),
|
||||
static_cast<uint32_t>(height),
|
||||
static_cast<uint32_t>(channel),
|
||||
data,
|
||||
};
|
||||
}
|
||||
|
||||
void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]) {
|
||||
for (uint32_t y = 0; y < image.height; y++) {
|
||||
for (uint32_t x = 0; x < image.width; x++) {
|
||||
for (uint32_t k = 0; k < image.channel; k++) {
|
||||
int index = (y * image.width + x) * image.channel + k;
|
||||
image.data[index] = (image.data[index] - means[k]) / stds[k];
|
||||
sd::Tensor<float> sd_image_to_tensor(sd_image_t image,
|
||||
int target_width,
|
||||
int target_height,
|
||||
bool scale) {
|
||||
sd::Tensor<float> tensor = sd::zeros<float>({static_cast<int64_t>(image.width),
|
||||
static_cast<int64_t>(image.height),
|
||||
static_cast<int64_t>(image.channel),
|
||||
1});
|
||||
for (uint32_t iw = 0; iw < image.width; ++iw) {
|
||||
for (uint32_t ih = 0; ih < image.height; ++ih) {
|
||||
for (uint32_t ic = 0; ic < image.channel; ++ic) {
|
||||
tensor.index(iw, ih, ic, 0) = sd_image_get_f32(image, iw, ih, ic, scale);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (target_width >= 0 && target_height >= 0 &&
|
||||
(tensor.shape()[0] != target_width || tensor.shape()[1] != target_height)) {
|
||||
tensor = sd::ops::interpolate(tensor,
|
||||
{target_width,
|
||||
target_height,
|
||||
tensor.shape()[2],
|
||||
tensor.shape()[3]});
|
||||
}
|
||||
return tensor;
|
||||
}
|
||||
|
||||
// Constants for means and std
|
||||
float means[3] = {0.48145466f, 0.4578275f, 0.40821073f};
|
||||
float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f};
|
||||
|
||||
// Function to clip and preprocess sd_image_f32_t
|
||||
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height) {
|
||||
float width_scale = (float)target_width / image.width;
|
||||
float height_scale = (float)target_height / image.height;
|
||||
sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height) {
|
||||
GGML_ASSERT(image.dim() == 4);
|
||||
GGML_ASSERT(image.shape()[2] == 3);
|
||||
GGML_ASSERT(image.shape()[3] == 1);
|
||||
GGML_ASSERT(target_width > 0 && target_height > 0);
|
||||
|
||||
float width_scale = static_cast<float>(target_width) / static_cast<float>(image.shape()[0]);
|
||||
float height_scale = static_cast<float>(target_height) / static_cast<float>(image.shape()[1]);
|
||||
float scale = std::fmax(width_scale, height_scale);
|
||||
|
||||
// Interpolation
|
||||
int resized_width = (int)(scale * image.width);
|
||||
int resized_height = (int)(scale * image.height);
|
||||
float* resized_data = (float*)malloc(resized_width * resized_height * image.channel * sizeof(float));
|
||||
int64_t resized_width = static_cast<int64_t>(scale * static_cast<float>(image.shape()[0]));
|
||||
int64_t resized_height = static_cast<int64_t>(scale * static_cast<float>(image.shape()[1]));
|
||||
|
||||
for (int y = 0; y < resized_height; y++) {
|
||||
for (int x = 0; x < resized_width; x++) {
|
||||
float original_x = (float)x * image.width / resized_width;
|
||||
float original_y = (float)y * image.height / resized_height;
|
||||
sd::Tensor<float> resized = sd::ops::interpolate(
|
||||
image,
|
||||
{resized_width, resized_height, image.shape()[2], image.shape()[3]});
|
||||
|
||||
uint32_t x1 = (uint32_t)original_x;
|
||||
uint32_t y1 = (uint32_t)original_y;
|
||||
uint32_t x2 = std::min(x1 + 1, image.width - 1);
|
||||
uint32_t y2 = std::min(y1 + 1, image.height - 1);
|
||||
int64_t h_offset = std::max<int64_t>((resized_height - target_height) / 2, 0);
|
||||
int64_t w_offset = std::max<int64_t>((resized_width - target_width) / 2, 0);
|
||||
|
||||
for (uint32_t k = 0; k < image.channel; k++) {
|
||||
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
|
||||
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
|
||||
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
|
||||
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
|
||||
|
||||
float x_ratio = original_x - x1;
|
||||
float y_ratio = original_y - y1;
|
||||
|
||||
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
|
||||
|
||||
*(resized_data + y * resized_width * image.channel + x * image.channel + k) = value;
|
||||
sd::Tensor<float> cropped({target_width, target_height, image.shape()[2], image.shape()[3]});
|
||||
for (int64_t y = 0; y < target_height; ++y) {
|
||||
for (int64_t x = 0; x < target_width; ++x) {
|
||||
for (int64_t c = 0; c < image.shape()[2]; ++c) {
|
||||
cropped.index(x, y, c, 0) = resized.index(x + w_offset, y + h_offset, c, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clip and preprocess
|
||||
int h_offset = std::max((int)(resized_height - target_height) / 2, 0);
|
||||
int w_offset = std::max((int)(resized_width - target_width) / 2, 0);
|
||||
|
||||
sd_image_f32_t result;
|
||||
result.width = target_width;
|
||||
result.height = target_height;
|
||||
result.channel = image.channel;
|
||||
result.data = (float*)malloc(target_height * target_width * image.channel * sizeof(float));
|
||||
|
||||
for (uint32_t k = 0; k < image.channel; k++) {
|
||||
for (uint32_t i = 0; i < result.height; i++) {
|
||||
for (uint32_t j = 0; j < result.width; j++) {
|
||||
int src_y = std::min(static_cast<int>(i + h_offset), resized_height - 1);
|
||||
int src_x = std::min(static_cast<int>(j + w_offset), resized_width - 1);
|
||||
*(result.data + i * result.width * image.channel + j * image.channel + k) =
|
||||
fmin(fmax(*(resized_data + src_y * resized_width * image.channel + src_x * image.channel + k), 0.0f), 255.0f) / 255.0f;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Free allocated memory
|
||||
free(resized_data);
|
||||
|
||||
// Normalize
|
||||
for (uint32_t k = 0; k < image.channel; k++) {
|
||||
for (uint32_t i = 0; i < result.height; i++) {
|
||||
for (uint32_t j = 0; j < result.width; j++) {
|
||||
// *(result.data + i * size * image.channel + j * image.channel + k) = 0.5f;
|
||||
int offset = i * result.width * image.channel + j * image.channel + k;
|
||||
float value = *(result.data + offset);
|
||||
value = (value - means[k]) / stds[k];
|
||||
// value = 0.5f;
|
||||
*(result.data + offset) = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
sd::Tensor<float> normalized = sd::ops::clamp(cropped, 0.0f, 1.0f);
|
||||
sd::Tensor<float> mean({1, 1, 3, 1}, {means[0], means[1], means[2]});
|
||||
sd::Tensor<float> std({1, 1, 3, 1}, {stds[0], stds[1], stds[2]});
|
||||
return (normalized - mean) / std;
|
||||
}
|
||||
|
||||
// Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/prompt_parser.py#L345
|
||||
|
||||
19
src/util.h
19
src/util.h
@ -7,6 +7,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "stable-diffusion.h"
|
||||
#include "tensor.hpp"
|
||||
|
||||
#define SAFE_STR(s) ((s) ? (s) : "")
|
||||
#define BOOL_STR(b) ((b) ? "true" : "false")
|
||||
@ -29,20 +30,14 @@ std::string utf32_to_utf8(const std::u32string& utf32_str);
|
||||
std::u32string unicode_value_to_utf32(int unicode_value);
|
||||
// std::string sd_basename(const std::string& path);
|
||||
|
||||
typedef struct {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t channel;
|
||||
float* data;
|
||||
} sd_image_f32_t;
|
||||
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index = 0);
|
||||
|
||||
void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]);
|
||||
sd::Tensor<float> sd_image_to_tensor(sd_image_t image,
|
||||
int target_width = -1,
|
||||
int target_height = -1,
|
||||
bool scale = true);
|
||||
|
||||
sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image);
|
||||
|
||||
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height);
|
||||
|
||||
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height);
|
||||
sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height);
|
||||
|
||||
class MmapWrapper {
|
||||
public:
|
||||
|
||||
989
src/vae.hpp
989
src/vae.hpp
File diff suppressed because it is too large
Load Diff
460
src/wan.hpp
460
src/wan.hpp
@ -25,7 +25,7 @@ namespace WAN {
|
||||
std::tuple<int, int, int> dilation;
|
||||
bool bias;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
params["weight"] = ggml_new_tensor_4d(ctx,
|
||||
GGML_TYPE_F16,
|
||||
std::get<2>(kernel_size),
|
||||
@ -53,11 +53,11 @@ namespace WAN {
|
||||
dilation(std::move(dilation)),
|
||||
bias(bias) {}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x, struct ggml_tensor* cache_x = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, ggml_tensor* cache_x = nullptr) {
|
||||
// x: [N*IC, ID, IH, IW]
|
||||
// result: x: [N*OC, ID, IH, IW]
|
||||
struct ggml_tensor* w = params["weight"];
|
||||
struct ggml_tensor* b = nullptr;
|
||||
ggml_tensor* w = params["weight"];
|
||||
ggml_tensor* b = nullptr;
|
||||
if (bias) {
|
||||
b = params["bias"];
|
||||
}
|
||||
@ -86,7 +86,7 @@ namespace WAN {
|
||||
protected:
|
||||
int64_t dim;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
ggml_type wtype = GGML_TYPE_F32;
|
||||
auto iter = tensor_storage_map.find(prefix + "gamma");
|
||||
if (iter != tensor_storage_map.end()) {
|
||||
@ -100,11 +100,11 @@ namespace WAN {
|
||||
RMS_norm(int64_t dim)
|
||||
: dim(dim) {}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) override {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) override {
|
||||
// x: [N*IC, ID, IH, IW], IC == dim
|
||||
// assert N == 1
|
||||
|
||||
struct ggml_tensor* w = params["gamma"];
|
||||
ggml_tensor* w = params["gamma"];
|
||||
w = ggml_reshape_1d(ctx->ggml_ctx, w, ggml_nelements(w));
|
||||
auto h = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, x, 3, 0, 1, 2)); // [ID, IH, IW, N*IC]
|
||||
h = ggml_rms_norm(ctx->ggml_ctx, h, 1e-12f);
|
||||
@ -148,10 +148,10 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b,
|
||||
std::vector<struct ggml_tensor*>& feat_cache,
|
||||
std::vector<ggml_tensor*>& feat_cache,
|
||||
int& feat_idx,
|
||||
int chunk_idx) {
|
||||
// x: [b*c, t, h, w]
|
||||
@ -254,8 +254,8 @@ namespace WAN {
|
||||
GGML_ASSERT(in_channels * factor % out_channels == 0);
|
||||
group_size = in_channels * factor / out_channels;
|
||||
}
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t B = 1) {
|
||||
// x: [B*IC, T, H, W]
|
||||
// return: [B*OC, T/factor_t, H/factor_s, W/factor_s]
|
||||
@ -301,8 +301,8 @@ namespace WAN {
|
||||
GGML_ASSERT(out_channels * factor % in_channels == 0);
|
||||
repeats = out_channels * factor / in_channels;
|
||||
}
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
bool first_chunk = false,
|
||||
int64_t B = 1) {
|
||||
// x: [B*IC, T, H, W]
|
||||
@ -356,14 +356,14 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b,
|
||||
std::vector<struct ggml_tensor*>& feat_cache,
|
||||
std::vector<ggml_tensor*>& feat_cache,
|
||||
int& feat_idx) {
|
||||
// x: [b*c, t, h, w]
|
||||
GGML_ASSERT(b == 1);
|
||||
struct ggml_tensor* h = x;
|
||||
ggml_tensor* h = x;
|
||||
if (in_dim != out_dim) {
|
||||
auto shortcut = std::dynamic_pointer_cast<CausalConv3d>(blocks["shortcut"]);
|
||||
|
||||
@ -430,15 +430,15 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b,
|
||||
std::vector<struct ggml_tensor*>& feat_cache,
|
||||
std::vector<ggml_tensor*>& feat_cache,
|
||||
int& feat_idx,
|
||||
int chunk_idx) {
|
||||
// x: [b*c, t, h, w]
|
||||
GGML_ASSERT(b == 1);
|
||||
struct ggml_tensor* x_copy = x;
|
||||
ggml_tensor* x_copy = x;
|
||||
|
||||
auto avg_shortcut = std::dynamic_pointer_cast<AvgDown3D>(blocks["avg_shortcut"]);
|
||||
|
||||
@ -492,15 +492,15 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b,
|
||||
std::vector<struct ggml_tensor*>& feat_cache,
|
||||
std::vector<ggml_tensor*>& feat_cache,
|
||||
int& feat_idx,
|
||||
int chunk_idx) {
|
||||
// x: [b*c, t, h, w]
|
||||
GGML_ASSERT(b == 1);
|
||||
struct ggml_tensor* x_copy = x;
|
||||
ggml_tensor* x_copy = x;
|
||||
|
||||
int i = 0;
|
||||
for (; i < mult; i++) {
|
||||
@ -537,8 +537,8 @@ namespace WAN {
|
||||
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Conv2d(dim, dim, {1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b) {
|
||||
// x: [b*c, t, h, w]
|
||||
GGML_ASSERT(b == 1);
|
||||
@ -659,10 +659,10 @@ namespace WAN {
|
||||
blocks["head.2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(out_dim, z_dim, {3, 3, 3}, {1, 1, 1}, {1, 1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b,
|
||||
std::vector<struct ggml_tensor*>& feat_cache,
|
||||
std::vector<ggml_tensor*>& feat_cache,
|
||||
int& feat_idx,
|
||||
int chunk_idx) {
|
||||
// x: [b*c, t, h, w]
|
||||
@ -830,10 +830,10 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b,
|
||||
std::vector<struct ggml_tensor*>& feat_cache,
|
||||
std::vector<ggml_tensor*>& feat_cache,
|
||||
int& feat_idx,
|
||||
int chunk_idx) {
|
||||
// x: [b*c, t, h, w]
|
||||
@ -934,16 +934,16 @@ namespace WAN {
|
||||
|
||||
int _conv_num = 33;
|
||||
int _conv_idx = 0;
|
||||
std::vector<struct ggml_tensor*> _feat_map;
|
||||
std::vector<ggml_tensor*> _feat_map;
|
||||
int _enc_conv_num = 28;
|
||||
int _enc_conv_idx = 0;
|
||||
std::vector<struct ggml_tensor*> _enc_feat_map;
|
||||
std::vector<ggml_tensor*> _enc_feat_map;
|
||||
|
||||
void clear_cache() {
|
||||
_conv_idx = 0;
|
||||
_feat_map = std::vector<struct ggml_tensor*>(_conv_num, nullptr);
|
||||
_feat_map = std::vector<ggml_tensor*>(_conv_num, nullptr);
|
||||
_enc_conv_idx = 0;
|
||||
_enc_feat_map = std::vector<struct ggml_tensor*>(_enc_conv_num, nullptr);
|
||||
_enc_feat_map = std::vector<ggml_tensor*>(_enc_conv_num, nullptr);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -966,8 +966,8 @@ namespace WAN {
|
||||
blocks["conv2"] = std::shared_ptr<GGMLBlock>(new CausalConv3d(z_dim, z_dim, {1, 1, 1}));
|
||||
}
|
||||
|
||||
struct ggml_tensor* patchify(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* patchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t patch_size,
|
||||
int64_t b = 1) {
|
||||
// x: [b*c, f, h*q, w*r]
|
||||
@ -993,8 +993,8 @@ namespace WAN {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t patch_size,
|
||||
int64_t b = 1) {
|
||||
// x: [b*c*r*q, f, h, w]
|
||||
@ -1019,8 +1019,8 @@ namespace WAN {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* encode(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* encode(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t b = 1) {
|
||||
// x: [b*c, t, h, w]
|
||||
GGML_ASSERT(b == 1);
|
||||
@ -1037,7 +1037,7 @@ namespace WAN {
|
||||
|
||||
int64_t t = x->ne[2];
|
||||
int64_t iter_ = 1 + (t - 1) / 4;
|
||||
struct ggml_tensor* out;
|
||||
ggml_tensor* out;
|
||||
for (int i = 0; i < iter_; i++) {
|
||||
_enc_conv_idx = 0;
|
||||
if (i == 0) {
|
||||
@ -1055,8 +1055,8 @@ namespace WAN {
|
||||
return mu;
|
||||
}
|
||||
|
||||
struct ggml_tensor* decode(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* z,
|
||||
ggml_tensor* decode(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* z,
|
||||
int64_t b = 1) {
|
||||
// z: [b*c, t, h, w]
|
||||
GGML_ASSERT(b == 1);
|
||||
@ -1068,7 +1068,7 @@ namespace WAN {
|
||||
|
||||
int64_t iter_ = z->ne[2];
|
||||
auto x = conv2->forward(ctx, z);
|
||||
struct ggml_tensor* out;
|
||||
ggml_tensor* out;
|
||||
for (int i = 0; i < iter_; i++) {
|
||||
_conv_idx = 0;
|
||||
if (i == 0) {
|
||||
@ -1087,8 +1087,8 @@ namespace WAN {
|
||||
return out;
|
||||
}
|
||||
|
||||
struct ggml_tensor* decode_partial(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* z,
|
||||
ggml_tensor* decode_partial(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* z,
|
||||
int i,
|
||||
int64_t b = 1) {
|
||||
// z: [b*c, t, h, w]
|
||||
@ -1109,6 +1109,7 @@ namespace WAN {
|
||||
};
|
||||
|
||||
struct WanVAERunner : public VAE {
|
||||
float scale_factor = 1.0f;
|
||||
bool decode_only = true;
|
||||
WanVAE ae;
|
||||
|
||||
@ -1118,7 +1119,7 @@ namespace WAN {
|
||||
const std::string prefix = "",
|
||||
bool decode_only = false,
|
||||
SDVersion version = VERSION_WAN2)
|
||||
: decode_only(decode_only), ae(decode_only, version == VERSION_WAN2_2_TI2V), VAE(backend, offload_params_to_cpu) {
|
||||
: decode_only(decode_only), ae(decode_only, version == VERSION_WAN2_2_TI2V), VAE(version, backend, offload_params_to_cpu) {
|
||||
ae.init(params_ctx, tensor_storage_map, prefix);
|
||||
}
|
||||
|
||||
@ -1126,26 +1127,82 @@ namespace WAN {
|
||||
return "wan_vae";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {
|
||||
ae.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* z, bool decode_graph) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(10240 * z->ne[2]);
|
||||
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
|
||||
SD_UNUSED(rng);
|
||||
return vae_output;
|
||||
}
|
||||
|
||||
z = to_backend(z);
|
||||
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents) {
|
||||
int channel_dim = latents.dim() == 5 ? 3 : 2;
|
||||
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1);
|
||||
if (latents.shape()[channel_dim] == 16) { // Wan2.1 VAE
|
||||
stats_shape[static_cast<size_t>(channel_dim)] = 16;
|
||||
|
||||
auto mean_tensor = sd::Tensor<float>::from_vector({-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f,
|
||||
0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f});
|
||||
mean_tensor.reshape_(stats_shape);
|
||||
auto std_tensor = sd::Tensor<float>::from_vector({2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f,
|
||||
3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f});
|
||||
std_tensor.reshape_(stats_shape);
|
||||
return {std::move(mean_tensor), std::move(std_tensor)};
|
||||
}
|
||||
if (latents.shape()[channel_dim] == 48) { // Wan2.2 VAE
|
||||
stats_shape[static_cast<size_t>(channel_dim)] = 48;
|
||||
|
||||
auto mean_tensor = sd::Tensor<float>::from_vector({-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
|
||||
-0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
|
||||
-0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
|
||||
-0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
|
||||
-0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
|
||||
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f});
|
||||
mean_tensor.reshape_(stats_shape);
|
||||
auto std_tensor = sd::Tensor<float>::from_vector({0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
|
||||
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
|
||||
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
|
||||
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
|
||||
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
|
||||
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f});
|
||||
std_tensor.reshape_(stats_shape);
|
||||
return {std::move(mean_tensor), std::move(std_tensor)};
|
||||
}
|
||||
GGML_ABORT("unexpected latent channel dimension %lld for version %d",
|
||||
(long long)latents.shape()[channel_dim],
|
||||
version);
|
||||
}
|
||||
|
||||
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
|
||||
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents);
|
||||
return (latents * std_tensor) / scale_factor + mean_tensor;
|
||||
}
|
||||
|
||||
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
|
||||
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents);
|
||||
return ((latents - mean_tensor) * scale_factor) / std_tensor;
|
||||
}
|
||||
|
||||
int get_encoder_output_channels(int input_channels) {
|
||||
return static_cast<int>(ae.z_dim);
|
||||
}
|
||||
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
|
||||
ggml_cgraph* gf = new_graph_custom(10240 * z_tensor.shape()[2]);
|
||||
ggml_tensor* z = make_input(z_tensor);
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
||||
ggml_tensor* out = decode_graph ? ae.decode(&runner_ctx, z) : ae.encode(&runner_ctx, z);
|
||||
|
||||
ggml_build_forward_expand(gf, out);
|
||||
|
||||
return gf;
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph_partial(struct ggml_tensor* z, bool decode_graph, int i) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(20480);
|
||||
ggml_cgraph* build_graph_partial(const sd::Tensor<float>& z_tensor, bool decode_graph, int i) {
|
||||
ggml_cgraph* gf = new_graph_custom(20480);
|
||||
|
||||
ae.clear_cache();
|
||||
|
||||
@ -1154,11 +1211,11 @@ namespace WAN {
|
||||
ae._feat_map[feat_idx] = feat_cache;
|
||||
}
|
||||
|
||||
z = to_backend(z);
|
||||
ggml_tensor* z = make_input(z_tensor);
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z);
|
||||
ggml_tensor* out = decode_graph ? ae.decode_partial(&runner_ctx, z, i) : ae.encode(&runner_ctx, z);
|
||||
|
||||
for (size_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) {
|
||||
ggml_tensor* feat_cache = ae._feat_map[feat_idx];
|
||||
@ -1173,86 +1230,85 @@ namespace WAN {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(const int n_threads,
|
||||
struct ggml_tensor* z,
|
||||
bool decode_graph,
|
||||
struct ggml_tensor** output,
|
||||
struct ggml_context* output_ctx = nullptr) override {
|
||||
sd::Tensor<float> _compute(const int n_threads,
|
||||
const sd::Tensor<float>& z,
|
||||
bool decode_graph) override {
|
||||
if (true) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
sd::Tensor<float> input;
|
||||
if (z.dim() == 4) {
|
||||
input = z.unsqueeze(2);
|
||||
}
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
if (input.empty()) {
|
||||
return build_graph(z, decode_graph);
|
||||
} else {
|
||||
return build_graph(input, decode_graph);
|
||||
}
|
||||
};
|
||||
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
|
||||
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, true),
|
||||
input.empty() ? z.dim() : input.dim());
|
||||
if (!result.empty() && z.dim() == 4) {
|
||||
result.squeeze_(2);
|
||||
}
|
||||
return result;
|
||||
} else { // chunk 1 result is weird
|
||||
ae.clear_cache();
|
||||
int64_t t = z->ne[2];
|
||||
int64_t t = z.shape()[2];
|
||||
int i = 0;
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph_partial(z, decode_graph, i);
|
||||
};
|
||||
struct ggml_tensor* out = nullptr;
|
||||
bool res = GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
|
||||
auto out_opt = GGMLRunner::compute<float>(get_graph, n_threads, true);
|
||||
if (!out_opt.has_value()) {
|
||||
return {};
|
||||
}
|
||||
sd::Tensor<float> out = std::move(*out_opt);
|
||||
ae.clear_cache();
|
||||
if (t == 1) {
|
||||
*output = out;
|
||||
return res;
|
||||
return out;
|
||||
}
|
||||
|
||||
*output = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], (t - 1) * 4 + 1, out->ne[3]);
|
||||
|
||||
auto copy_to_output = [&]() {
|
||||
for (int64_t i3 = 0; i3 < out->ne[3]; i3++) {
|
||||
for (int64_t i2 = 0; i2 < out->ne[2]; i2++) {
|
||||
for (int64_t i1 = 0; i1 < out->ne[1]; i1++) {
|
||||
for (int64_t i0 = 0; i0 < out->ne[0]; i0++) {
|
||||
float value = ggml_ext_tensor_get_f32(out, i0, i1, i2, i3);
|
||||
int64_t offset = (i == 0) ? 0 : (1 + (i - 1) * 4);
|
||||
ggml_ext_tensor_set_f32(*output, value, i0, i1, offset + i2, i3);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
copy_to_output();
|
||||
|
||||
out = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], 4, out->ne[3]);
|
||||
sd::Tensor<float> output = std::move(out);
|
||||
|
||||
for (i = 1; i < t; i++) {
|
||||
res = res || GGMLRunner::compute(get_graph, n_threads, true, &out);
|
||||
auto chunk_opt = GGMLRunner::compute<float>(get_graph, n_threads, true);
|
||||
if (!chunk_opt.has_value()) {
|
||||
return {};
|
||||
}
|
||||
out = std::move(*chunk_opt);
|
||||
ae.clear_cache();
|
||||
copy_to_output();
|
||||
output = sd::ops::concat(output, out, 2);
|
||||
}
|
||||
free_cache_ctx_and_buffer();
|
||||
return res;
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
if (true) {
|
||||
// cpu f32, pass
|
||||
// cpu f16, pass
|
||||
// cuda f16, pass
|
||||
// cuda f32, pass
|
||||
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 2, 16);
|
||||
ggml_set_f32(z, 0.5f);
|
||||
z = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
|
||||
print_ggml_tensor(z);
|
||||
struct ggml_tensor* out = nullptr;
|
||||
auto z = sd::load_tensor_from_file_as_tensor<float>("wan_vae_z.bin");
|
||||
print_sd_tensor(z);
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, z, true, &out, work_ctx);
|
||||
auto out_opt = _compute(8, z, true);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("decode test done in %ldms", t1 - t0);
|
||||
}
|
||||
};
|
||||
@ -1314,10 +1370,10 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr) {
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr) {
|
||||
// x: [N, n_token, dim]
|
||||
// pe: [n_token, d_head/2, 2, 2]
|
||||
// return [N, n_token, dim]
|
||||
@ -1355,9 +1411,9 @@ namespace WAN {
|
||||
bool qk_norm = true,
|
||||
float eps = 1e-6)
|
||||
: WanSelfAttention(dim, num_heads, qk_norm, eps) {}
|
||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context,
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context,
|
||||
int64_t context_img_len) = 0;
|
||||
};
|
||||
|
||||
@ -1368,9 +1424,9 @@ namespace WAN {
|
||||
bool qk_norm = true,
|
||||
float eps = 1e-6)
|
||||
: WanCrossAttention(dim, num_heads, qk_norm, eps) {}
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context,
|
||||
int64_t context_img_len) override {
|
||||
// x: [N, n_token, dim]
|
||||
// context: [N, n_context, dim]
|
||||
@ -1416,9 +1472,9 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* context,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* context,
|
||||
int64_t context_img_len) override {
|
||||
// x: [N, n_token, dim]
|
||||
// context: [N, context_img_len + context_txt_len, dim]
|
||||
@ -1464,7 +1520,7 @@ namespace WAN {
|
||||
}
|
||||
};
|
||||
|
||||
static struct ggml_tensor* modulate_add(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* e) {
|
||||
static ggml_tensor* modulate_add(ggml_context* ctx, ggml_tensor* x, ggml_tensor* e) {
|
||||
// x: [N, n_token, dim]
|
||||
// e: [N, 1, dim] or [N, T, 1, dim]
|
||||
if (ggml_n_dims(e) == 3) {
|
||||
@ -1478,7 +1534,7 @@ namespace WAN {
|
||||
return x;
|
||||
}
|
||||
|
||||
static struct ggml_tensor* modulate_mul(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* e) {
|
||||
static ggml_tensor* modulate_mul(ggml_context* ctx, ggml_tensor* x, ggml_tensor* e) {
|
||||
// x: [N, n_token, dim]
|
||||
// e: [N, 1, dim] or [N, T, 1, dim]
|
||||
if (ggml_n_dims(e) == 3) {
|
||||
@ -1496,7 +1552,7 @@ namespace WAN {
|
||||
protected:
|
||||
int64_t dim;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
|
||||
}
|
||||
@ -1530,11 +1586,11 @@ namespace WAN {
|
||||
blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim));
|
||||
}
|
||||
|
||||
virtual struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* e,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* context,
|
||||
virtual ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* e,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* context,
|
||||
int64_t context_img_len = 257) {
|
||||
// x: [N, n_token, dim]
|
||||
// e: [N, 6, dim] or [N, T, 6, dim]
|
||||
@ -1584,7 +1640,7 @@ namespace WAN {
|
||||
class VaceWanAttentionBlock : public WanAttentionBlock {
|
||||
protected:
|
||||
int block_id;
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
|
||||
}
|
||||
@ -1606,11 +1662,11 @@ namespace WAN {
|
||||
}
|
||||
|
||||
std::pair<ggml_tensor*, ggml_tensor*> forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* c,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* e,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* context,
|
||||
ggml_tensor* c,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* e,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* context,
|
||||
int64_t context_img_len = 257) {
|
||||
// x: [N, n_token, dim]
|
||||
// e: [N, 6, dim] or [N, T, 6, dim]
|
||||
@ -1636,7 +1692,7 @@ namespace WAN {
|
||||
protected:
|
||||
int64_t dim;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
enum ggml_type wtype = get_type(prefix + "weight", tensor_storage_map, GGML_TYPE_F32);
|
||||
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1);
|
||||
}
|
||||
@ -1653,9 +1709,9 @@ namespace WAN {
|
||||
blocks["head"] = std::shared_ptr<GGMLBlock>(new Linear(dim, out_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* e) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* e) {
|
||||
// x: [N, n_token, dim]
|
||||
// e: [N, dim] or [N, T, dim]
|
||||
// return [N, n_token, out_dim]
|
||||
@ -1683,7 +1739,7 @@ namespace WAN {
|
||||
int64_t in_dim;
|
||||
int64_t flf_pos_embed_token_number;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
if (flf_pos_embed_token_number > 0) {
|
||||
params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1);
|
||||
}
|
||||
@ -1701,8 +1757,8 @@ namespace WAN {
|
||||
blocks["proj.4"] = std::shared_ptr<GGMLBlock>(new LayerNorm(out_dim));
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* image_embeds) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* image_embeds) {
|
||||
if (flf_pos_embed_token_number > 0) {
|
||||
auto emb_pos = params["emb_pos"];
|
||||
|
||||
@ -1821,8 +1877,8 @@ namespace WAN {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x) {
|
||||
ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x) {
|
||||
int64_t W = x->ne[0];
|
||||
int64_t H = x->ne[1];
|
||||
int64_t T = x->ne[2];
|
||||
@ -1834,8 +1890,8 @@ namespace WAN {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* unpatchify(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
ggml_tensor* unpatchify(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
int64_t t_len,
|
||||
int64_t h_len,
|
||||
int64_t w_len) {
|
||||
@ -1861,13 +1917,13 @@ namespace WAN {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* clip_fea = nullptr,
|
||||
struct ggml_tensor* vace_context = nullptr,
|
||||
ggml_tensor* forward_orig(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* clip_fea = nullptr,
|
||||
ggml_tensor* vace_context = nullptr,
|
||||
float vace_strength = 1.f,
|
||||
int64_t N = 1) {
|
||||
// x: [N*C, T, H, W], C => in_dim
|
||||
@ -1955,14 +2011,14 @@ namespace WAN {
|
||||
return x;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* clip_fea = nullptr,
|
||||
struct ggml_tensor* time_dim_concat = nullptr,
|
||||
struct ggml_tensor* vace_context = nullptr,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* clip_fea = nullptr,
|
||||
ggml_tensor* time_dim_concat = nullptr,
|
||||
ggml_tensor* vace_context = nullptr,
|
||||
float vace_strength = 1.f,
|
||||
int64_t N = 1) {
|
||||
// Forward pass of DiT.
|
||||
@ -2129,27 +2185,27 @@ namespace WAN {
|
||||
return desc;
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
wan.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* clip_fea = nullptr,
|
||||
struct ggml_tensor* c_concat = nullptr,
|
||||
struct ggml_tensor* time_dim_concat = nullptr,
|
||||
struct ggml_tensor* vace_context = nullptr,
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor = {},
|
||||
const sd::Tensor<float>& clip_fea_tensor = {},
|
||||
const sd::Tensor<float>& c_concat_tensor = {},
|
||||
const sd::Tensor<float>& time_dim_concat_tensor = {},
|
||||
const sd::Tensor<float>& vace_context_tensor = {},
|
||||
float vace_strength = 1.f) {
|
||||
struct ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
|
||||
ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
|
||||
|
||||
x = to_backend(x);
|
||||
timesteps = to_backend(timesteps);
|
||||
context = to_backend(context);
|
||||
clip_fea = to_backend(clip_fea);
|
||||
c_concat = to_backend(c_concat);
|
||||
time_dim_concat = to_backend(time_dim_concat);
|
||||
vace_context = to_backend(vace_context);
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
ggml_tensor* context = make_optional_input(context_tensor);
|
||||
ggml_tensor* clip_fea = make_optional_input(clip_fea_tensor);
|
||||
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
|
||||
ggml_tensor* time_dim_concat = make_optional_input(time_dim_concat_tensor);
|
||||
ggml_tensor* vace_context = make_optional_input(vace_context_tensor);
|
||||
|
||||
pe_vec = Rope::gen_wan_pe(static_cast<int>(x->ne[2]),
|
||||
static_cast<int>(x->ne[1]),
|
||||
@ -2174,7 +2230,7 @@ namespace WAN {
|
||||
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = wan.forward(&runner_ctx,
|
||||
ggml_tensor* out = wan.forward(&runner_ctx,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
@ -2189,60 +2245,60 @@ namespace WAN {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* clip_fea = nullptr,
|
||||
struct ggml_tensor* c_concat = nullptr,
|
||||
struct ggml_tensor* time_dim_concat = nullptr,
|
||||
struct ggml_tensor* vace_context = nullptr,
|
||||
float vace_strength = 1.f,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context = {},
|
||||
const sd::Tensor<float>& clip_fea = {},
|
||||
const sd::Tensor<float>& c_concat = {},
|
||||
const sd::Tensor<float>& time_dim_concat = {},
|
||||
const sd::Tensor<float>& vace_context = {},
|
||||
float vace_strength = 1.f) {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// cpu f16: pass
|
||||
// cuda f16: pass
|
||||
// cpu q8_0: pass
|
||||
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 1, 16);
|
||||
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 104, 60, 1, 16);
|
||||
// ggml_set_f32(x, 0.01f);
|
||||
auto x = load_tensor_from_file(work_ctx, "wan_dit_x.bin");
|
||||
print_ggml_tensor(x);
|
||||
auto x = sd::load_tensor_from_file_as_tensor<float>("wan_dit_x.bin");
|
||||
print_sd_tensor(x);
|
||||
|
||||
std::vector<float> timesteps_vec(3, 1000.f);
|
||||
timesteps_vec[0] = 0.f;
|
||||
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
|
||||
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
|
||||
|
||||
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 512, 1);
|
||||
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 4096, 512, 1);
|
||||
// ggml_set_f32(context, 0.01f);
|
||||
auto context = load_tensor_from_file(work_ctx, "wan_dit_context.bin");
|
||||
print_ggml_tensor(context);
|
||||
// auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin");
|
||||
auto context = sd::load_tensor_from_file_as_tensor<float>("wan_dit_context.bin");
|
||||
print_sd_tensor(context);
|
||||
// auto clip_fea = load_tensor_from_file(ctx, "wan_dit_clip_fea.bin");
|
||||
// print_ggml_tensor(clip_fea);
|
||||
|
||||
struct ggml_tensor* out = nullptr;
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx);
|
||||
auto out_opt = compute(8, x, timesteps, context, {}, {}, {}, {}, 1.f);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("wan test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
136
src/z_image.hpp
136
src/z_image.hpp
@ -42,10 +42,10 @@ namespace ZImage {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
int64_t n_token = x->ne[1];
|
||||
int64_t N = x->ne[2];
|
||||
@ -124,23 +124,23 @@ namespace ZImage {
|
||||
blocks["w3"] = std::make_shared<Linear>(dim, hidden_dim, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx, struct ggml_tensor* x) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
||||
auto w1 = std::dynamic_pointer_cast<Linear>(blocks["w1"]);
|
||||
auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]);
|
||||
auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]);
|
||||
|
||||
auto x1 = w1->forward(ctx, x);
|
||||
auto x3 = w3->forward(ctx, x);
|
||||
x = ggml_mul(ctx->ggml_ctx, ggml_silu(ctx->ggml_ctx, x1), x3);
|
||||
x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3);
|
||||
x = w2->forward(ctx, x);
|
||||
|
||||
return x;
|
||||
}
|
||||
};
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* modulate(struct ggml_context* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* scale) {
|
||||
__STATIC_INLINE__ ggml_tensor* modulate(ggml_context* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* scale) {
|
||||
// x: [N, L, C]
|
||||
// scale: [N, C]
|
||||
scale = ggml_reshape_3d(ctx, scale, scale->ne[0], 1, scale->ne[1]); // [N, 1, C]
|
||||
@ -175,11 +175,11 @@ namespace ZImage {
|
||||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* pe,
|
||||
struct ggml_tensor* mask = nullptr,
|
||||
struct ggml_tensor* adaln_input = nullptr) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* pe,
|
||||
ggml_tensor* mask = nullptr,
|
||||
ggml_tensor* adaln_input = nullptr) {
|
||||
auto attention = std::dynamic_pointer_cast<JointAttention>(blocks["attention"]);
|
||||
auto feed_forward = std::dynamic_pointer_cast<FeedForward>(blocks["feed_forward"]);
|
||||
auto attention_norm1 = std::dynamic_pointer_cast<RMSNorm>(blocks["attention_norm1"]);
|
||||
@ -241,9 +241,9 @@ namespace ZImage {
|
||||
blocks["adaLN_modulation.1"] = std::make_shared<Linear>(MIN(hidden_size, ADALN_EMBED_DIM), hidden_size);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* c) {
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* c) {
|
||||
// x: [N, n_token, hidden_size]
|
||||
// c: [N, hidden_size]
|
||||
// return: [N, n_token, patch_size * patch_size * out_channels]
|
||||
@ -284,7 +284,7 @@ namespace ZImage {
|
||||
protected:
|
||||
ZImageParams z_image_params;
|
||||
|
||||
void init_params(struct ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
void init_params(ggml_context* ctx, const String2TensorStorage& tensor_storage_map = {}, const std::string prefix = "") override {
|
||||
params["cap_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
|
||||
params["x_pad_token"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, z_image_params.hidden_size);
|
||||
}
|
||||
@ -346,11 +346,11 @@ namespace ZImage {
|
||||
blocks["final_layer"] = std::make_shared<FinalLayer>(z_image_params.hidden_size, z_image_params.patch_size, z_image_params.out_channels);
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward_core(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* pe) {
|
||||
ggml_tensor* forward_core(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* pe) {
|
||||
auto x_embedder = std::dynamic_pointer_cast<Linear>(blocks["x_embedder"]);
|
||||
auto t_embedder = std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder"]);
|
||||
auto cap_embedder_0 = std::dynamic_pointer_cast<RMSNorm>(blocks["cap_embedder.0"]);
|
||||
@ -414,11 +414,11 @@ namespace ZImage {
|
||||
return img;
|
||||
}
|
||||
|
||||
struct ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timestep,
|
||||
struct ggml_tensor* context,
|
||||
struct ggml_tensor* pe,
|
||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||
ggml_tensor* x,
|
||||
ggml_tensor* timestep,
|
||||
ggml_tensor* context,
|
||||
ggml_tensor* pe,
|
||||
std::vector<ggml_tensor*> ref_latents = {}) {
|
||||
// Forward pass of DiT.
|
||||
// x: [N, C, H, W]
|
||||
@ -477,24 +477,25 @@ namespace ZImage {
|
||||
return "z_image";
|
||||
}
|
||||
|
||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) {
|
||||
z_image.get_param_tensors(tensors, prefix);
|
||||
}
|
||||
|
||||
struct ggml_cgraph* build_graph(struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
||||
const sd::Tensor<float>& timesteps_tensor,
|
||||
const sd::Tensor<float>& context_tensor,
|
||||
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
|
||||
bool increase_ref_index = false) {
|
||||
ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
|
||||
ggml_tensor* x = make_input(x_tensor);
|
||||
ggml_tensor* timesteps = make_input(timesteps_tensor);
|
||||
GGML_ASSERT(x->ne[3] == 1);
|
||||
struct ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
|
||||
|
||||
x = to_backend(x);
|
||||
context = to_backend(context);
|
||||
timesteps = to_backend(timesteps);
|
||||
|
||||
for (int i = 0; i < ref_latents.size(); i++) {
|
||||
ref_latents[i] = to_backend(ref_latents[i]);
|
||||
GGML_ASSERT(!context_tensor.empty());
|
||||
ggml_tensor* context = make_input(context_tensor);
|
||||
std::vector<ggml_tensor*> ref_latents;
|
||||
ref_latents.reserve(ref_latents_tensor.size());
|
||||
for (const auto& ref_latent_tensor : ref_latents_tensor) {
|
||||
ref_latents.push_back(make_input(ref_latent_tensor));
|
||||
}
|
||||
|
||||
pe_vec = Rope::gen_z_image_pe(static_cast<int>(x->ne[1]),
|
||||
@ -518,7 +519,7 @@ namespace ZImage {
|
||||
set_backend_tensor_data(pe, pe_vec.data());
|
||||
auto runner_ctx = get_context();
|
||||
|
||||
struct ggml_tensor* out = z_image.forward(&runner_ctx,
|
||||
ggml_tensor* out = z_image.forward(&runner_ctx,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
@ -530,54 +531,59 @@ namespace ZImage {
|
||||
return gf;
|
||||
}
|
||||
|
||||
bool compute(int n_threads,
|
||||
struct ggml_tensor* x,
|
||||
struct ggml_tensor* timesteps,
|
||||
struct ggml_tensor* context,
|
||||
std::vector<ggml_tensor*> ref_latents = {},
|
||||
bool increase_ref_index = false,
|
||||
struct ggml_tensor** output = nullptr,
|
||||
struct ggml_context* output_ctx = nullptr) {
|
||||
sd::Tensor<float> compute(int n_threads,
|
||||
const sd::Tensor<float>& x,
|
||||
const sd::Tensor<float>& timesteps,
|
||||
const sd::Tensor<float>& context,
|
||||
const std::vector<sd::Tensor<float>>& ref_latents = {},
|
||||
bool increase_ref_index = false) {
|
||||
// x: [N, in_channels, h, w]
|
||||
// timesteps: [N, ]
|
||||
// context: [N, max_position, hidden_size]
|
||||
auto get_graph = [&]() -> struct ggml_cgraph* {
|
||||
auto get_graph = [&]() -> ggml_cgraph* {
|
||||
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
|
||||
};
|
||||
|
||||
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
|
||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
||||
}
|
||||
|
||||
void test() {
|
||||
struct ggml_init_params params;
|
||||
ggml_init_params params;
|
||||
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
|
||||
params.mem_buffer = nullptr;
|
||||
params.no_alloc = false;
|
||||
|
||||
struct ggml_context* work_ctx = ggml_init(params);
|
||||
GGML_ASSERT(work_ctx != nullptr);
|
||||
ggml_context* ctx = ggml_init(params);
|
||||
GGML_ASSERT(ctx != nullptr);
|
||||
|
||||
{
|
||||
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
|
||||
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1);
|
||||
// ggml_set_f32(x, 0.01f);
|
||||
auto x = load_tensor_from_file(work_ctx, "./z_image_x.bin");
|
||||
print_ggml_tensor(x);
|
||||
auto x = sd::load_tensor_from_file_as_tensor<float>("./z_image_x.bin");
|
||||
print_sd_tensor(x);
|
||||
|
||||
std::vector<float> timesteps_vec(1, 0.f);
|
||||
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
|
||||
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
|
||||
|
||||
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 2560, 256, 1);
|
||||
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 2560, 256, 1);
|
||||
// ggml_set_f32(context, 0.01f);
|
||||
auto context = load_tensor_from_file(work_ctx, "./z_image_context.bin");
|
||||
print_ggml_tensor(context);
|
||||
auto context = sd::load_tensor_from_file_as_tensor<float>("./z_image_context.bin");
|
||||
print_sd_tensor(context);
|
||||
|
||||
struct ggml_tensor* out = nullptr;
|
||||
sd::Tensor<float> out;
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
|
||||
auto out_opt = compute(8,
|
||||
x,
|
||||
timesteps,
|
||||
context,
|
||||
{},
|
||||
false);
|
||||
int64_t t1 = ggml_time_ms();
|
||||
|
||||
print_ggml_tensor(out);
|
||||
GGML_ASSERT(!out_opt.empty());
|
||||
out = std::move(out_opt);
|
||||
print_sd_tensor(out);
|
||||
LOG_DEBUG("z_image test done in %lldms", t1 - t0);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user