mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2026-05-15 03:40:34 +00:00
Compare commits
No commits in common. "master" and "master-584-0a7ae07" have entirely different histories.
master
...
master-584
15
.github/pull_request_template.md
vendored
15
.github/pull_request_template.md
vendored
@ -1,15 +0,0 @@
|
|||||||
## Summary
|
|
||||||
|
|
||||||
<!-- Describe what changed and why. Keep the PR focused on one clear change. -->
|
|
||||||
|
|
||||||
## Related Issue / Discussion
|
|
||||||
|
|
||||||
<!-- Link related issues, discussions, or previous PRs if applicable. -->
|
|
||||||
|
|
||||||
## Additional Information
|
|
||||||
|
|
||||||
<!-- Add verification notes, screenshots, sample output, or other context when applicable. -->
|
|
||||||
|
|
||||||
## Checklist
|
|
||||||
|
|
||||||
- [ ] I have read and confirmed this PR follows the [contribution guidelines](https://github.com/leejet/stable-diffusion.cpp/blob/master/CONTRIBUTING.md).
|
|
||||||
@ -72,31 +72,37 @@ option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF
|
|||||||
if(SD_CUDA)
|
if(SD_CUDA)
|
||||||
message("-- Use CUDA as backend stable-diffusion")
|
message("-- Use CUDA as backend stable-diffusion")
|
||||||
set(GGML_CUDA ON)
|
set(GGML_CUDA ON)
|
||||||
|
add_definitions(-DSD_USE_CUDA)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(SD_METAL)
|
if(SD_METAL)
|
||||||
message("-- Use Metal as backend stable-diffusion")
|
message("-- Use Metal as backend stable-diffusion")
|
||||||
set(GGML_METAL ON)
|
set(GGML_METAL ON)
|
||||||
|
add_definitions(-DSD_USE_METAL)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (SD_VULKAN)
|
if (SD_VULKAN)
|
||||||
message("-- Use Vulkan as backend stable-diffusion")
|
message("-- Use Vulkan as backend stable-diffusion")
|
||||||
set(GGML_VULKAN ON)
|
set(GGML_VULKAN ON)
|
||||||
|
add_definitions(-DSD_USE_VULKAN)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (SD_OPENCL)
|
if (SD_OPENCL)
|
||||||
message("-- Use OpenCL as backend stable-diffusion")
|
message("-- Use OpenCL as backend stable-diffusion")
|
||||||
set(GGML_OPENCL ON)
|
set(GGML_OPENCL ON)
|
||||||
|
add_definitions(-DSD_USE_OPENCL)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (SD_HIPBLAS)
|
if (SD_HIPBLAS)
|
||||||
message("-- Use HIPBLAS as backend stable-diffusion")
|
message("-- Use HIPBLAS as backend stable-diffusion")
|
||||||
set(GGML_HIP ON)
|
set(GGML_HIP ON)
|
||||||
|
add_definitions(-DSD_USE_CUDA)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if(SD_MUSA)
|
if(SD_MUSA)
|
||||||
message("-- Use MUSA as backend stable-diffusion")
|
message("-- Use MUSA as backend stable-diffusion")
|
||||||
set(GGML_MUSA ON)
|
set(GGML_MUSA ON)
|
||||||
|
add_definitions(-DSD_USE_CUDA)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(SD_WEBP)
|
if(SD_WEBP)
|
||||||
@ -106,8 +112,7 @@ if(SD_WEBP)
|
|||||||
"Or link against system library:\n cmake (...) -DSD_USE_SYSTEM_WEBP=ON")
|
"Or link against system library:\n cmake (...) -DSD_USE_SYSTEM_WEBP=ON")
|
||||||
endif()
|
endif()
|
||||||
if(SD_USE_SYSTEM_WEBP)
|
if(SD_USE_SYSTEM_WEBP)
|
||||||
find_package(WebP)
|
find_package(WebP REQUIRED)
|
||||||
if(WebP_FOUND)
|
|
||||||
add_library(webp ALIAS WebP::webp)
|
add_library(webp ALIAS WebP::webp)
|
||||||
# libwebp CMake target naming is not consistent across versions/distros.
|
# libwebp CMake target naming is not consistent across versions/distros.
|
||||||
# Some export WebP::libwebpmux, others export WebP::webpmux.
|
# Some export WebP::libwebpmux, others export WebP::webpmux.
|
||||||
@ -121,14 +126,6 @@ if(SD_WEBP)
|
|||||||
"Expected WebP::libwebpmux or WebP::webpmux."
|
"Expected WebP::libwebpmux or WebP::webpmux."
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
else()
|
|
||||||
find_package(PkgConfig REQUIRED)
|
|
||||||
pkg_check_modules(WebP REQUIRED IMPORTED_TARGET GLOBAL libwebp)
|
|
||||||
pkg_check_modules(WebPMux REQUIRED IMPORTED_TARGET GLOBAL libwebpmux)
|
|
||||||
link_libraries(PkgConfig::WebP)
|
|
||||||
link_libraries(PkgConfig::WebPMux)
|
|
||||||
add_library(libwebpmux ALIAS PkgConfig::WebPMux)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -142,13 +139,6 @@ if(SD_WEBM)
|
|||||||
"Or link against system library:\n cmake (...) -DSD_USE_SYSTEM_WEBM=ON")
|
"Or link against system library:\n cmake (...) -DSD_USE_SYSTEM_WEBM=ON")
|
||||||
endif()
|
endif()
|
||||||
if(SD_USE_SYSTEM_WEBM)
|
if(SD_USE_SYSTEM_WEBM)
|
||||||
find_package(PkgConfig)
|
|
||||||
if(PkgConfig_FOUND)
|
|
||||||
pkg_check_modules(WebM REQUIRED IMPORTED_TARGET GLOBAL libwebm)
|
|
||||||
endif()
|
|
||||||
if(PkgConfig_FOUND AND WebM_FOUND)
|
|
||||||
link_libraries(PkgConfig::WebM)
|
|
||||||
else()
|
|
||||||
find_path(WEBM_INCLUDE_DIR
|
find_path(WEBM_INCLUDE_DIR
|
||||||
NAMES mkvmuxer/mkvmuxer.h mkvparser/mkvparser.h common/webmids.h
|
NAMES mkvmuxer/mkvmuxer.h mkvparser/mkvparser.h common/webmids.h
|
||||||
PATH_SUFFIXES webm
|
PATH_SUFFIXES webm
|
||||||
@ -163,7 +153,6 @@ if(SD_WEBM)
|
|||||||
INTERFACE_INCLUDE_DIRECTORIES "${WEBM_INCLUDE_DIR}")
|
INTERFACE_INCLUDE_DIRECTORIES "${WEBM_INCLUDE_DIR}")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endif()
|
|
||||||
|
|
||||||
set(SD_LIB stable-diffusion)
|
set(SD_LIB stable-diffusion)
|
||||||
|
|
||||||
@ -233,6 +222,7 @@ if(SD_SYCL)
|
|||||||
message("-- Use SYCL as backend stable-diffusion")
|
message("-- Use SYCL as backend stable-diffusion")
|
||||||
set(GGML_SYCL ON)
|
set(GGML_SYCL ON)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -fsycl")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -fsycl")
|
||||||
|
add_definitions(-DSD_USE_SYCL)
|
||||||
# disable fast-math on host, see:
|
# disable fast-math on host, see:
|
||||||
# https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-10/fp-model-fp.html
|
# https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-10/fp-model-fp.html
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
|
|||||||
@ -1,65 +0,0 @@
|
|||||||
# Contributing
|
|
||||||
|
|
||||||
This document collects general contribution conventions for this repository.
|
|
||||||
|
|
||||||
## Before You Start
|
|
||||||
|
|
||||||
Before opening a PR, please search existing PRs to avoid duplicating ongoing work.
|
|
||||||
|
|
||||||
For large-scale refactors or changes with broad impact, please open an issue first to discuss the approach before submitting a PR.
|
|
||||||
|
|
||||||
If you want to update a third-party dependency, please open an issue first instead of submitting a direct PR. See [Dependency Updates](#dependency-updates) for details.
|
|
||||||
|
|
||||||
## Pull Requests
|
|
||||||
|
|
||||||
Keep each PR focused on one clear change. Large or overly complex PRs are harder to review and may not be merged.
|
|
||||||
|
|
||||||
Follow Conventional Commit-style subjects seen in history: `feat:`, `fix:`, `refactor:`, `ci:`, `docs:`, `chore:`. Keep subjects imperative and scoped.
|
|
||||||
|
|
||||||
PRs should include:
|
|
||||||
|
|
||||||
- What changed and why (short problem/solution summary).
|
|
||||||
- Verification evidence when applicable (commands and key outputs).
|
|
||||||
- Linked issue/PR context when applicable.
|
|
||||||
- Screenshots or sample outputs for UI/visual behavior changes.
|
|
||||||
|
|
||||||
## Code Style
|
|
||||||
|
|
||||||
Format code according to the repository style before submitting changes.
|
|
||||||
|
|
||||||
Formatting follows `.clang-format` (Chromium base, 4-space indent, no tabs). Run `format-code.sh` before opening a PR. Keep C++ standard at C++17-compatible patterns used in this repo.
|
|
||||||
|
|
||||||
Naming conventions:
|
|
||||||
|
|
||||||
- Use `PascalCase` for class/struct/type names.
|
|
||||||
- In `PascalCase` names, preserve common abbreviations in uppercase, for example `SD`, `API`, `HTTP`, `JSON`, `RGB`, `VAE`, `TAE`, `LoRA`, and `WebP`.
|
|
||||||
- Use `snake_case` for functions, methods, variables, and file names unless an existing API requires a different style.
|
|
||||||
- Use a trailing underscore for private data member names, for example `hidden_size_` or `tokenizer_`.
|
|
||||||
- Use `.h` for C and C++ header files. Do not introduce new `.hpp` headers.
|
|
||||||
- Use macro-based header include guards instead of `#pragma once`.
|
|
||||||
- Format header include guards as `__SD_{PATH}__`, where `{PATH}` is the header path in uppercase snake case without the file extension. For example, `src/sample.h` should use `__SD_SAMPLE_H__`.
|
|
||||||
- Do not introduce anonymous namespaces in new or modified code; prefer `static` file-local functions/variables or an explicit named namespace when scoping is needed.
|
|
||||||
- In `class`/`struct` definitions, place data members before member functions unless an existing type already clearly follows a different pattern.
|
|
||||||
- Keep `test_*.cpp` / `test_*.py` naming for tests.
|
|
||||||
|
|
||||||
Some older code in the project may not fully follow the current conventions. Please do not submit PRs that only rewrite existing code to match style rules.
|
|
||||||
|
|
||||||
## AI-Assisted Contributions
|
|
||||||
|
|
||||||
AI tools may be used to assist development, but contributors are responsible for the quality and correctness of the submitted code.
|
|
||||||
|
|
||||||
If any part of a contribution was generated with AI assistance, the contributor must perform a thorough human review before submitting the PR and understand every changed line.
|
|
||||||
|
|
||||||
Do not list AI tools as co-authors. The human contributor is the sole responsible author of the submitted code.
|
|
||||||
|
|
||||||
Please do not submit AI-generated code that you do not understand, and do not include meaningless experiments, temporary test code, or unrelated generated output in a PR.
|
|
||||||
|
|
||||||
## Dependency Updates
|
|
||||||
|
|
||||||
Do not submit PRs that update `ggml`. `ggml` updates are performed only after local validation by the maintainer.
|
|
||||||
|
|
||||||
Other third-party dependencies are not updated unless necessary. If you want to update a dependency, please open an issue first instead of submitting a direct PR.
|
|
||||||
|
|
||||||
## Security & Configuration
|
|
||||||
|
|
||||||
Do not commit model weights, secrets, or local absolute paths. Keep large binaries out of git unless intentionally tracked release assets.
|
|
||||||
@ -58,7 +58,6 @@ API and command-line option may change frequently.***
|
|||||||
- [Ovis-Image](./docs/ovis_image.md)
|
- [Ovis-Image](./docs/ovis_image.md)
|
||||||
- [Anima](./docs/anima.md)
|
- [Anima](./docs/anima.md)
|
||||||
- [ERNIE-Image](./docs/ernie_image.md)
|
- [ERNIE-Image](./docs/ernie_image.md)
|
||||||
- [HiDream-O1-Image](./docs/hidream_o1_image.md)
|
|
||||||
- Image Edit Models
|
- Image Edit Models
|
||||||
- [FLUX.1-Kontext-dev](./docs/kontext.md)
|
- [FLUX.1-Kontext-dev](./docs/kontext.md)
|
||||||
- [Qwen Image Edit series](./docs/qwen_image_edit.md)
|
- [Qwen Image Edit series](./docs/qwen_image_edit.md)
|
||||||
@ -78,10 +77,9 @@ API and command-line option may change frequently.***
|
|||||||
- OpenCL
|
- OpenCL
|
||||||
- SYCL
|
- SYCL
|
||||||
- Supported weight formats
|
- Supported weight formats
|
||||||
- Pytorch checkpoint (`.ckpt` or `.pth` or `.pt`)
|
- Pytorch checkpoint (`.ckpt` or `.pth`)
|
||||||
- Safetensors (`.safetensors`)
|
- Safetensors (`.safetensors`)
|
||||||
- GGUF (`.gguf`)
|
- GGUF (`.gguf`)
|
||||||
- Convert mode supports converting model weights to `.gguf` or `.safetensors`
|
|
||||||
- Supported platforms
|
- Supported platforms
|
||||||
- Linux
|
- Linux
|
||||||
- Mac OS
|
- Mac OS
|
||||||
@ -149,7 +147,6 @@ If you want to improve performance or reduce VRAM/RAM usage, please refer to [pe
|
|||||||
- [Ovis-Image](./docs/ovis_image.md)
|
- [Ovis-Image](./docs/ovis_image.md)
|
||||||
- [Anima](./docs/anima.md)
|
- [Anima](./docs/anima.md)
|
||||||
- [ERNIE-Image](./docs/ernie_image.md)
|
- [ERNIE-Image](./docs/ernie_image.md)
|
||||||
- [HiDream-O1-Image](./docs/hidream_o1_image.md)
|
|
||||||
- [LoRA](./docs/lora.md)
|
- [LoRA](./docs/lora.md)
|
||||||
- [LCM/LCM-LoRA](./docs/lcm.md)
|
- [LCM/LCM-LoRA](./docs/lcm.md)
|
||||||
- [Using PhotoMaker to personalize image generation](./docs/photo_maker.md)
|
- [Using PhotoMaker to personalize image generation](./docs/photo_maker.md)
|
||||||
@ -165,7 +162,6 @@ These projects wrap `stable-diffusion.cpp` for easier use in other languages/fra
|
|||||||
|
|
||||||
* Golang (non-cgo): [seasonjs/stable-diffusion](https://github.com/seasonjs/stable-diffusion)
|
* Golang (non-cgo): [seasonjs/stable-diffusion](https://github.com/seasonjs/stable-diffusion)
|
||||||
* Golang (cgo): [Binozo/GoStableDiffusion](https://github.com/Binozo/GoStableDiffusion)
|
* Golang (cgo): [Binozo/GoStableDiffusion](https://github.com/Binozo/GoStableDiffusion)
|
||||||
* Golang (non-cgo): [l8bloom/gosd](https://github.com/l8bloom/gosd)
|
|
||||||
* C#: [DarthAffe/StableDiffusion.NET](https://github.com/DarthAffe/StableDiffusion.NET)
|
* C#: [DarthAffe/StableDiffusion.NET](https://github.com/DarthAffe/StableDiffusion.NET)
|
||||||
* Python: [william-murray1204/stable-diffusion-cpp-python](https://github.com/william-murray1204/stable-diffusion-cpp-python)
|
* Python: [william-murray1204/stable-diffusion-cpp-python](https://github.com/william-murray1204/stable-diffusion-cpp-python)
|
||||||
* Rust: [newfla/diffusion-rs](https://github.com/newfla/diffusion-rs)
|
* Rust: [newfla/diffusion-rs](https://github.com/newfla/diffusion-rs)
|
||||||
|
|||||||
Binary file not shown.
|
Before Width: | Height: | Size: 2.2 MiB |
@ -131,6 +131,8 @@ sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum
|
|||||||
| `warmup` | Steps to always compute before caching starts | 4 |
|
| `warmup` | Steps to always compute before caching starts | 4 |
|
||||||
| `stop` | Stop caching at this fraction of total steps | 0.9 |
|
| `stop` | Stop caching at this fraction of total steps | 0.9 |
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### Performance Tips
|
### Performance Tips
|
||||||
|
|
||||||
- Start with default thresholds and adjust based on output quality
|
- Start with default thresholds and adjust based on output quality
|
||||||
|
|||||||
@ -1,20 +0,0 @@
|
|||||||
# How to Use
|
|
||||||
|
|
||||||
## Download weights
|
|
||||||
|
|
||||||
- Download HiDream-O1-Image-Dev
|
|
||||||
- safetensors: https://huggingface.co/Comfy-Org/HiDream-O1-Image/tree/main/checkpoints
|
|
||||||
- Download HiDream-O1-Image
|
|
||||||
- safetensors: https://huggingface.co/Comfy-Org/HiDream-O1-Image/tree/main/checkpoints
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### HiDream-O1-Image-Dev
|
|
||||||
|
|
||||||
```
|
|
||||||
.\bin\Release\sd-cli.exe -m ..\..\ComfyUI\models\diffusion_models\hidream_o1_image_dev_bf16.safetensors -p "a lovely cat holding a sign says
|
|
||||||
'hidream o1 cpp'" --cfg-scale 1.0 -v -H 1024 -W 1024
|
|
||||||
```
|
|
||||||
|
|
||||||
<img width="256" alt="HiDream-O1-Image-Dev example" src="../assets/hidream-o1/dev_example.png" />
|
|
||||||
|
|
||||||
@ -4,17 +4,14 @@
|
|||||||
usage: ./bin/sd-cli [options]
|
usage: ./bin/sd-cli [options]
|
||||||
|
|
||||||
CLI Options:
|
CLI Options:
|
||||||
-o, --output <string> path to write result image to. you can use printf-style %d format specifiers for image
|
-o, --output <string> path to write result image to. you can use printf-style %d format specifiers for image sequences (default:
|
||||||
sequences (default: ./output.png) (eg. output_%03d.png). Single-file video outputs
|
./output.png) (eg. output_%03d.png). For video generation, single-file outputs support .avi, .webm, and animated .webp
|
||||||
support .avi, .webm, and animated .webp
|
--preview-path <string> path to write preview image to (default: ./preview.png). Multi-frame previews support .avi, .webm, and animated .webp
|
||||||
|
--preview-interval <int> interval in denoising steps between consecutive updates of the image preview file (default is 1, meaning updating at
|
||||||
|
every step)
|
||||||
|
--output-begin-idx <int> starting index for output image sequence, must be non-negative (default 0 if specified %d in output path, 1 otherwise)
|
||||||
--image <string> path to the image to inspect (for metadata mode)
|
--image <string> path to the image to inspect (for metadata mode)
|
||||||
--metadata-format <string> metadata output format, one of [text, json] (default: text)
|
--metadata-format <string> metadata output format, one of [text, json] (default: text)
|
||||||
--preview-path <string> path to write preview image to (default: ./preview.png). Multi-frame previews support
|
|
||||||
.avi, .webm, and animated .webp
|
|
||||||
--preview-interval <int> interval in denoising steps between consecutive updates of the image preview file
|
|
||||||
(default is 1, meaning updating at every step)
|
|
||||||
--output-begin-idx <int> starting index for output image sequence, must be non-negative (default 0 if specified
|
|
||||||
%d in output path, 1 otherwise)
|
|
||||||
--canny apply canny preprocessor (edge detection)
|
--canny apply canny preprocessor (edge detection)
|
||||||
--convert-name convert tensor name (for convert mode)
|
--convert-name convert tensor name (for convert mode)
|
||||||
-v, --verbose print extra info
|
-v, --verbose print extra info
|
||||||
@ -34,8 +31,7 @@ Context Options:
|
|||||||
--clip_g <string> path to the clip-g text encoder
|
--clip_g <string> path to the clip-g text encoder
|
||||||
--clip_vision <string> path to the clip-vision encoder
|
--clip_vision <string> path to the clip-vision encoder
|
||||||
--t5xxl <string> path to the t5xxl text encoder
|
--t5xxl <string> path to the t5xxl text encoder
|
||||||
--llm <string> path to the llm text encoder. For example: (qwenvl2.5 for qwen-image,
|
--llm <string> path to the llm text encoder. For example: (qwenvl2.5 for qwen-image, mistral-small3.2 for flux2, ...)
|
||||||
mistral-small3.2 for flux2, ...)
|
|
||||||
--llm_vision <string> path to the llm vit
|
--llm_vision <string> path to the llm vit
|
||||||
--qwen2vl <string> alias of --llm. Deprecated.
|
--qwen2vl <string> alias of --llm. Deprecated.
|
||||||
--qwen2vl_vision <string> alias of --llm_vision. Deprecated.
|
--qwen2vl_vision <string> alias of --llm_vision. Deprecated.
|
||||||
@ -47,18 +43,16 @@ Context Options:
|
|||||||
--control-net <string> path to control net model
|
--control-net <string> path to control net model
|
||||||
--embd-dir <string> embeddings directory
|
--embd-dir <string> embeddings directory
|
||||||
--lora-model-dir <string> lora model directory
|
--lora-model-dir <string> lora model directory
|
||||||
--hires-upscalers-dir <string> highres fix upscaler model directory
|
|
||||||
--tensor-type-rules <string> weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0")
|
--tensor-type-rules <string> weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0")
|
||||||
--photo-maker <string> path to PHOTOMAKER model
|
--photo-maker <string> path to PHOTOMAKER model
|
||||||
--upscale-model <string> path to esrgan model.
|
--upscale-model <string> path to esrgan model.
|
||||||
-t, --threads <int> number of threads to use during computation (default: -1). If threads <= 0,
|
-t, --threads <int> number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of
|
||||||
then threads will be set to the number of CPU physical cores
|
CPU physical cores
|
||||||
--chroma-t5-mask-pad <int> t5 mask pad size of chroma
|
--chroma-t5-mask-pad <int> t5 mask pad size of chroma
|
||||||
--max-vram <float> maximum VRAM budget in GiB for graph-cut segmented execution. 0 disables
|
--vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5)
|
||||||
graph splitting
|
--vae-tiling process vae in tiles to reduce memory usage
|
||||||
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
|
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
|
||||||
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM
|
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed
|
||||||
when needed
|
|
||||||
--mmap whether to memory-map model
|
--mmap whether to memory-map model
|
||||||
--control-net-cpu keep controlnet in cpu (for low vram)
|
--control-net-cpu keep controlnet in cpu (for low vram)
|
||||||
--clip-on-cpu keep clip in cpu (for low vram)
|
--clip-on-cpu keep clip in cpu (for low vram)
|
||||||
@ -73,19 +67,20 @@ Context Options:
|
|||||||
--chroma-disable-dit-mask disable dit mask for chroma
|
--chroma-disable-dit-mask disable dit mask for chroma
|
||||||
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
|
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
|
||||||
--chroma-enable-t5-mask enable t5 mask for chroma
|
--chroma-enable-t5-mask enable t5 mask for chroma
|
||||||
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K,
|
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
|
||||||
q4_K). If not specified, the default is the type of the weight file
|
type of the weight file
|
||||||
--rng RNG, one of [std_default, cuda, cpu], default: cuda(sd-webui), cpu(comfyui)
|
--rng RNG, one of [std_default, cuda, cpu], default: cuda(sd-webui), cpu(comfyui)
|
||||||
--sampler-rng sampler RNG, one of [std_default, cuda, cpu]. If not specified, use --rng
|
--sampler-rng sampler RNG, one of [std_default, cuda, cpu]. If not specified, use --rng
|
||||||
--prediction prediction type override, one of [eps, v, edm_v, sd3_flow, flux_flow,
|
--prediction prediction type override, one of [eps, v, edm_v, sd3_flow, flux_flow, flux2_flow]
|
||||||
flux2_flow]
|
--lora-apply-mode the way to apply LoRA, one of [auto, immediately, at_runtime], default is auto. In auto mode, if the model weights
|
||||||
--lora-apply-mode the way to apply LoRA, one of [auto, immediately, at_runtime], default is
|
contain any quantized parameters, the at_runtime mode will be used; otherwise,
|
||||||
auto. In auto mode, if the model weights contain any quantized parameters,
|
immediately will be used.The immediately mode may have precision and
|
||||||
the at_runtime mode will be used; otherwise, immediately will be used.The
|
compatibility issues with quantized parameters, but it usually offers faster inference
|
||||||
immediately mode may have precision and compatibility issues with quantized
|
speed and, in some cases, lower memory usage. The at_runtime mode, on the
|
||||||
parameters, but it usually offers faster inference speed and, in some cases,
|
other hand, is exactly the opposite.
|
||||||
lower memory usage. The at_runtime mode, on the other hand, is exactly the
|
--vae-tile-size tile size for vae tiling, format [X]x[Y] (default: 32x32)
|
||||||
opposite.
|
--vae-relative-tile-size relative tile size for vae tiling, format [X]x[Y], in fraction of image size if < 1, in number of tiles per dim if >=1
|
||||||
|
(overrides --vae-tile-size)
|
||||||
|
|
||||||
Generation Options:
|
Generation Options:
|
||||||
-p, --prompt <string> the prompt to render
|
-p, --prompt <string> the prompt to render
|
||||||
@ -94,101 +89,69 @@ Generation Options:
|
|||||||
--end-img <string> path to the end image, required by flf2v
|
--end-img <string> path to the end image, required by flf2v
|
||||||
--mask <string> path to the mask image
|
--mask <string> path to the mask image
|
||||||
--control-image <string> path to control image, control net
|
--control-image <string> path to control image, control net
|
||||||
--control-video <string> path to control video frames, It must be a directory path. The video frames
|
--control-video <string> path to control video frames, It must be a directory path. The video frames inside should be stored as images in
|
||||||
inside should be stored as images in lexicographical (character) order. For
|
lexicographical (character) order. For example, if the control video path is
|
||||||
example, if the control video path is `frames`, the directory contain images
|
`frames`, the directory contain images such as 00.png, 01.png, ... etc.
|
||||||
such as 00.png, 01.png, ... etc.
|
|
||||||
--pm-id-images-dir <string> path to PHOTOMAKER input id images dir
|
--pm-id-images-dir <string> path to PHOTOMAKER input id images dir
|
||||||
--pm-id-embed-path <string> path to PHOTOMAKER v2 id embed
|
--pm-id-embed-path <string> path to PHOTOMAKER v2 id embed
|
||||||
--hires-upscaler <string> highres fix upscaler, Lanczos, Nearest, Latent, Latent (nearest), Latent
|
|
||||||
(nearest-exact), Latent (antialiased), Latent (bicubic), Latent (bicubic
|
|
||||||
antialiased), or a model name under --hires-upscalers-dir (default: Latent)
|
|
||||||
--extra-sample-args <string> extra sampler args, key=value list. Currently lcm supports noise_clip_std,
|
|
||||||
noise_scale_start, noise_scale_end
|
|
||||||
-H, --height <int> image height, in pixel space (default: 512)
|
-H, --height <int> image height, in pixel space (default: 512)
|
||||||
-W, --width <int> image width, in pixel space (default: 512)
|
-W, --width <int> image width, in pixel space (default: 512)
|
||||||
--steps <int> number of sample steps (default: 20)
|
--steps <int> number of sample steps (default: 20)
|
||||||
--high-noise-steps <int> (high noise) number of sample steps (default: -1 = auto)
|
--high-noise-steps <int> (high noise) number of sample steps (default: -1 = auto)
|
||||||
--clip-skip <int> ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer
|
--clip-skip <int> ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer (default: -1). <= 0 represents unspecified,
|
||||||
(default: -1). <= 0 represents unspecified, will be 1 for SD1.x, 2 for SD2.x
|
will be 1 for SD1.x, 2 for SD2.x
|
||||||
-b, --batch-count <int> batch count
|
-b, --batch-count <int> batch count
|
||||||
--video-frames <int> video frames (default: 1)
|
--video-frames <int> video frames (default: 1)
|
||||||
--fps <int> fps (default: 24)
|
--fps <int> fps (default: 24)
|
||||||
--timestep-shift <int> shift timestep for NitroFusion models (default: 0). recommended N for
|
--timestep-shift <int> shift timestep for NitroFusion models (default: 0). recommended N for NitroSD-Realism around 250 and 500 for
|
||||||
NitroSD-Realism around 250 and 500 for NitroSD-Vibrant
|
NitroSD-Vibrant
|
||||||
--upscale-repeats <int> Run the ESRGAN upscaler this many times (default: 1)
|
--upscale-repeats <int> Run the ESRGAN upscaler this many times (default: 1)
|
||||||
--upscale-tile-size <int> tile size for ESRGAN upscaling (default: 128)
|
--upscale-tile-size <int> tile size for ESRGAN upscaling (default: 128)
|
||||||
--hires-width <int> highres fix target width, 0 to use --hires-scale (default: 0)
|
|
||||||
--hires-height <int> highres fix target height, 0 to use --hires-scale (default: 0)
|
|
||||||
--hires-steps <int> highres fix second pass sample steps, 0 to reuse --steps (default: 0)
|
|
||||||
--hires-upscale-tile-size <int> highres fix upscaler tile size, reserved for model-backed upscalers (default:
|
|
||||||
128)
|
|
||||||
--cfg-scale <float> unconditional guidance scale: (default: 7.0)
|
--cfg-scale <float> unconditional guidance scale: (default: 7.0)
|
||||||
--img-cfg-scale <float> image guidance scale for inpaint or instruct-pix2pix models: (default: same
|
--img-cfg-scale <float> image guidance scale for inpaint or instruct-pix2pix models: (default: same as --cfg-scale)
|
||||||
as --cfg-scale)
|
|
||||||
--guidance <float> distilled guidance scale for models with guidance input (default: 3.5)
|
--guidance <float> distilled guidance scale for models with guidance input (default: 3.5)
|
||||||
--slg-scale <float> skip layer guidance (SLG) scale, only for DiT models: (default: 0). 0 means
|
--slg-scale <float> skip layer guidance (SLG) scale, only for DiT models: (default: 0). 0 means disabled, a value of 2.5 is nice for sd3.5
|
||||||
disabled, a value of 2.5 is nice for sd3.5 medium
|
medium
|
||||||
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
||||||
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
||||||
--eta <float> noise multiplier (default: 0 for ddim_trailing, tcd, res_multistep and
|
--eta <float> noise multiplier (default: 0 for ddim_trailing, tcd, res_multistep and res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
||||||
res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
|
||||||
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
||||||
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
||||||
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models
|
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
|
||||||
(default: same as --cfg-scale)
|
--high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input (default: 3.5)
|
||||||
--high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input
|
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
|
||||||
(default: 3.5)
|
|
||||||
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default:
|
|
||||||
0)
|
|
||||||
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
||||||
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
||||||
--high-noise-eta <float> (high noise) noise multiplier (default: 0 for ddim_trailing, tcd,
|
--high-noise-eta <float> (high noise) noise multiplier (default: 0 for ddim_trailing, tcd, res_multistep and res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
||||||
res_multistep and res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
|
||||||
--strength <float> strength for noising/unnoising (default: 0.75)
|
--strength <float> strength for noising/unnoising (default: 0.75)
|
||||||
--pm-style-strength <float>
|
--pm-style-strength <float>
|
||||||
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full
|
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
|
||||||
destruction of information in init image
|
--moe-boundary <float> timestep boundary for Wan2.2 MoE model. (default: 0.875). Only enabled if `--high-noise-steps` is set to -1
|
||||||
--moe-boundary <float> timestep boundary for Wan2.2 MoE model. (default: 0.875). Only enabled if
|
|
||||||
`--high-noise-steps` is set to -1
|
|
||||||
--vace-strength <float> wan vace strength
|
--vace-strength <float> wan vace strength
|
||||||
--vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5)
|
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
|
||||||
--hires-scale <float> highres fix scale when target size is not set (default: 2.0)
|
|
||||||
--hires-denoising-strength <float> highres fix second pass denoising strength (default: 0.7)
|
|
||||||
--increase-ref-index automatically increase the indices of references images based on the order
|
|
||||||
they are listed (starting with 1).
|
|
||||||
--disable-auto-resize-ref-image disable auto resize of ref images
|
--disable-auto-resize-ref-image disable auto resize of ref images
|
||||||
--disable-image-metadata do not embed generation metadata on image files
|
--disable-image-metadata do not embed generation metadata on image files
|
||||||
--vae-tiling process vae in tiles to reduce memory usage
|
|
||||||
--hires enable highres fix
|
|
||||||
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
||||||
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m,
|
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
|
||||||
dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s,
|
tcd, res_multistep, res_2s, er_sde] (default: euler for Flux/SD3/Wan, euler_a
|
||||||
er_sde, euler_cfg_pp, euler_a_cfg_pp] (default: euler for Flux/SD3/Wan, euler_a otherwise)
|
otherwise)
|
||||||
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a,
|
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm,
|
||||||
dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep,
|
ddim_trailing, tcd, res_multistep, res_2s, er_sde] default: euler for Flux/SD3/Wan,
|
||||||
res_2s, er_sde, euler_cfg_pp, euler_a_cfg_pp] default: euler for Flux/SD3/Wan, euler_a otherwise
|
euler_a otherwise
|
||||||
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits,
|
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
|
||||||
smoothstep, sgm_uniform, simple, kl_optimal, lcm, bong_tangent], default:
|
kl_optimal, lcm, bong_tangent], default: discrete
|
||||||
discrete
|
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
|
||||||
--sigmas custom sigma values for the sampler, comma-separated (e.g.,
|
|
||||||
"14.61,7.8,3.5,0.0").
|
|
||||||
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
||||||
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
||||||
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
||||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET),
|
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level),
|
||||||
'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT
|
'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
|
||||||
Chebyshev+Taylor forecasting)
|
|
||||||
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
||||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit:
|
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=;
|
||||||
Fn=,Bn=,threshold=,warmup=; spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=.
|
spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=. Examples:
|
||||||
Examples: "threshold=0.25" or "threshold=1.5,reset=0"
|
"threshold=0.25" or "threshold=1.5,reset=0" or "w=0.4,window=2"
|
||||||
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g.,
|
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
||||||
"1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
|
||||||
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
||||||
--vae-tile-size tile size for vae tiling, format [X]x[Y] (default: 32x32)
|
|
||||||
--vae-relative-tile-size relative tile size for vae tiling, format [X]x[Y], in fraction of image size
|
|
||||||
if < 1, in number of tiles per dim if >=1 (overrides --vae-tile-size)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Metadata mode inspects PNG/JPEG container metadata without loading any model:
|
Metadata mode inspects PNG/JPEG container metadata without loading any model:
|
||||||
|
|||||||
@ -278,9 +278,7 @@ void parse_args(int argc, const char** argv, SDCliParams& cli_params, SDContextP
|
|||||||
bool valid = cli_params.resolve_and_validate();
|
bool valid = cli_params.resolve_and_validate();
|
||||||
if (valid && cli_params.mode != METADATA) {
|
if (valid && cli_params.mode != METADATA) {
|
||||||
valid = ctx_params.resolve_and_validate(cli_params.mode) &&
|
valid = ctx_params.resolve_and_validate(cli_params.mode) &&
|
||||||
gen_params.resolve_and_validate(cli_params.mode,
|
gen_params.resolve_and_validate(cli_params.mode, ctx_params.lora_model_dir);
|
||||||
ctx_params.lora_model_dir,
|
|
||||||
ctx_params.hires_upscalers_dir);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!valid) {
|
if (!valid) {
|
||||||
@ -433,9 +431,8 @@ bool save_results(const SDCliParams& cli_params,
|
|||||||
if (!img.data)
|
if (!img.data)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
const int64_t metadata_seed = cli_params.mode == VID_GEN ? gen_params.seed : gen_params.seed + idx;
|
|
||||||
std::string params = gen_params.embed_image_metadata
|
std::string params = gen_params.embed_image_metadata
|
||||||
? get_image_params(ctx_params, gen_params, metadata_seed, cli_params.mode)
|
? get_image_params(ctx_params, gen_params, gen_params.seed + idx)
|
||||||
: "";
|
: "";
|
||||||
const bool ok = write_image_to_file(path.string(), img.data, img.width, img.height, img.channel, params, 90);
|
const bool ok = write_image_to_file(path.string(), img.data, img.width, img.height, img.channel, params, 90);
|
||||||
LOG_INFO("save result image %d to '%s' (%s)", idx, path.string().c_str(), ok ? "success" : "failure");
|
LOG_INFO("save result image %d to '%s' (%s)", idx, path.string().c_str(), ok ? "success" : "failure");
|
||||||
@ -691,13 +688,6 @@ int main(int argc, const char* argv[]) {
|
|||||||
vae_decode_only = false;
|
vae_decode_only = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gen_params.hires_enabled &&
|
|
||||||
(gen_params.resolved_hires_upscaler == SD_HIRES_UPSCALER_MODEL ||
|
|
||||||
gen_params.resolved_hires_upscaler == SD_HIRES_UPSCALER_LANCZOS ||
|
|
||||||
gen_params.resolved_hires_upscaler == SD_HIRES_UPSCALER_NEAREST)) {
|
|
||||||
vae_decode_only = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
sd_ctx_params_t sd_ctx_params = ctx_params.to_sd_ctx_params_t(vae_decode_only, true, cli_params.taesd_preview);
|
sd_ctx_params_t sd_ctx_params = ctx_params.to_sd_ctx_params_t(vae_decode_only, true, cli_params.taesd_preview);
|
||||||
|
|
||||||
SDImageVec results;
|
SDImageVec results;
|
||||||
|
|||||||
@ -107,60 +107,47 @@ static bool is_absolute_path(const std::string& p) {
|
|||||||
|
|
||||||
std::string ArgOptions::wrap_text(const std::string& text, size_t width, size_t indent) {
|
std::string ArgOptions::wrap_text(const std::string& text, size_t width, size_t indent) {
|
||||||
std::ostringstream oss;
|
std::ostringstream oss;
|
||||||
size_t pos = 0;
|
|
||||||
size_t line_len = 0;
|
size_t line_len = 0;
|
||||||
|
size_t pos = 0;
|
||||||
|
|
||||||
while (pos < text.size()) {
|
while (pos < text.size()) {
|
||||||
|
// Preserve manual newlines
|
||||||
if (text[pos] == '\n') {
|
if (text[pos] == '\n') {
|
||||||
oss << '\n'
|
oss << '\n'
|
||||||
<< std::string(indent, ' ');
|
<< std::string(indent, ' ');
|
||||||
line_len = 0;
|
line_len = indent;
|
||||||
++pos;
|
++pos;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (std::isspace(static_cast<unsigned char>(text[pos]))) {
|
// Add the character
|
||||||
++pos;
|
oss << text[pos];
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t word_start = pos;
|
|
||||||
while (pos < text.size() &&
|
|
||||||
text[pos] != '\n' &&
|
|
||||||
!std::isspace(static_cast<unsigned char>(text[pos]))) {
|
|
||||||
++pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string word = text.substr(word_start, pos - word_start);
|
|
||||||
while (!word.empty()) {
|
|
||||||
size_t separator_len = line_len == 0 ? 0 : 1;
|
|
||||||
if (line_len + separator_len + word.size() <= width) {
|
|
||||||
if (separator_len > 0) {
|
|
||||||
oss << ' ';
|
|
||||||
++line_len;
|
++line_len;
|
||||||
}
|
++pos;
|
||||||
oss << word;
|
|
||||||
line_len += word.size();
|
|
||||||
word.clear();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (line_len > 0) {
|
// If the current line exceeds width, try to break at the last space
|
||||||
oss << '\n'
|
if (line_len >= width) {
|
||||||
<< std::string(indent, ' ');
|
std::string current = oss.str();
|
||||||
line_len = 0;
|
size_t back = current.size();
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t chunk_len = std::min(width, word.size());
|
// Find the last space (for a clean break)
|
||||||
oss << word.substr(0, chunk_len);
|
while (back > 0 && current[back - 1] != ' ' && current[back - 1] != '\n')
|
||||||
line_len = chunk_len;
|
--back;
|
||||||
word.erase(0, chunk_len);
|
|
||||||
if (!word.empty()) {
|
// If found a space to break on
|
||||||
oss << '\n'
|
if (back > 0 && current[back - 1] != '\n') {
|
||||||
|
std::string before = current.substr(0, back - 1);
|
||||||
|
std::string after = current.substr(back);
|
||||||
|
oss.str("");
|
||||||
|
oss.clear();
|
||||||
|
oss << before << "\n"
|
||||||
|
<< std::string(indent, ' ') << after;
|
||||||
|
} else {
|
||||||
|
// If no space found, just break at width
|
||||||
|
oss << "\n"
|
||||||
<< std::string(indent, ' ');
|
<< std::string(indent, ' ');
|
||||||
line_len = 0;
|
|
||||||
}
|
}
|
||||||
|
line_len = indent;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,10 +351,7 @@ ArgOptions SDContextParams::get_options() {
|
|||||||
"--lora-model-dir",
|
"--lora-model-dir",
|
||||||
"lora model directory",
|
"lora model directory",
|
||||||
&lora_model_dir},
|
&lora_model_dir},
|
||||||
{"",
|
|
||||||
"--hires-upscalers-dir",
|
|
||||||
"highres fix upscaler model directory",
|
|
||||||
&hires_upscalers_dir},
|
|
||||||
{"",
|
{"",
|
||||||
"--tensor-type-rules",
|
"--tensor-type-rules",
|
||||||
"weight type per tensor pattern (example: \"^vae\\.=f16,model\\.=q8_0\")",
|
"weight type per tensor pattern (example: \"^vae\\.=f16,model\\.=q8_0\")",
|
||||||
@ -394,12 +378,7 @@ ArgOptions SDContextParams::get_options() {
|
|||||||
&chroma_t5_mask_pad},
|
&chroma_t5_mask_pad},
|
||||||
};
|
};
|
||||||
|
|
||||||
options.float_options = {
|
options.float_options = {};
|
||||||
{"",
|
|
||||||
"--max-vram",
|
|
||||||
"maximum VRAM budget in GiB for graph-cut segmented execution. 0 disables graph splitting",
|
|
||||||
&max_vram},
|
|
||||||
};
|
|
||||||
|
|
||||||
options.bool_options = {
|
options.bool_options = {
|
||||||
{"",
|
{"",
|
||||||
@ -670,12 +649,10 @@ std::string SDContextParams::to_string() const {
|
|||||||
<< " wtype: " << sd_type_name(wtype) << ",\n"
|
<< " wtype: " << sd_type_name(wtype) << ",\n"
|
||||||
<< " tensor_type_rules: \"" << tensor_type_rules << "\",\n"
|
<< " tensor_type_rules: \"" << tensor_type_rules << "\",\n"
|
||||||
<< " lora_model_dir: \"" << lora_model_dir << "\",\n"
|
<< " lora_model_dir: \"" << lora_model_dir << "\",\n"
|
||||||
<< " hires_upscalers_dir: \"" << hires_upscalers_dir << "\",\n"
|
|
||||||
<< " photo_maker_path: \"" << photo_maker_path << "\",\n"
|
<< " photo_maker_path: \"" << photo_maker_path << "\",\n"
|
||||||
<< " rng_type: " << sd_rng_type_name(rng_type) << ",\n"
|
<< " rng_type: " << sd_rng_type_name(rng_type) << ",\n"
|
||||||
<< " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n"
|
<< " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n"
|
||||||
<< " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n"
|
<< " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n"
|
||||||
<< " max_vram: " << max_vram << ",\n"
|
|
||||||
<< " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n"
|
<< " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n"
|
||||||
<< " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n"
|
<< " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n"
|
||||||
<< " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n"
|
<< " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n"
|
||||||
@ -750,7 +727,6 @@ sd_ctx_params_t SDContextParams::to_sd_ctx_params_t(bool vae_decode_only, bool f
|
|||||||
chroma_use_t5_mask,
|
chroma_use_t5_mask,
|
||||||
chroma_t5_mask_pad,
|
chroma_t5_mask_pad,
|
||||||
qwen_image_zero_cond_t,
|
qwen_image_zero_cond_t,
|
||||||
max_vram,
|
|
||||||
};
|
};
|
||||||
return sd_ctx_params;
|
return sd_ctx_params;
|
||||||
}
|
}
|
||||||
@ -801,16 +777,6 @@ ArgOptions SDGenerationParams::get_options() {
|
|||||||
"--pm-id-embed-path",
|
"--pm-id-embed-path",
|
||||||
"path to PHOTOMAKER v2 id embed",
|
"path to PHOTOMAKER v2 id embed",
|
||||||
&pm_id_embed_path},
|
&pm_id_embed_path},
|
||||||
{"",
|
|
||||||
"--hires-upscaler",
|
|
||||||
"highres fix upscaler, Lanczos, Nearest, Latent, Latent (nearest), Latent (nearest-exact), "
|
|
||||||
"Latent (antialiased), Latent (bicubic), Latent (bicubic antialiased), or a model name "
|
|
||||||
"under --hires-upscalers-dir (default: Latent)",
|
|
||||||
&hires_upscaler},
|
|
||||||
{"",
|
|
||||||
"--extra-sample-args",
|
|
||||||
"extra sampler args, key=value list. Currently lcm supports noise_clip_std, noise_scale_start, noise_scale_end",
|
|
||||||
&extra_sample_args},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
options.int_options = {
|
options.int_options = {
|
||||||
@ -860,22 +826,6 @@ ArgOptions SDGenerationParams::get_options() {
|
|||||||
"--upscale-tile-size",
|
"--upscale-tile-size",
|
||||||
"tile size for ESRGAN upscaling (default: 128)",
|
"tile size for ESRGAN upscaling (default: 128)",
|
||||||
&upscale_tile_size},
|
&upscale_tile_size},
|
||||||
{"",
|
|
||||||
"--hires-width",
|
|
||||||
"highres fix target width, 0 to use --hires-scale (default: 0)",
|
|
||||||
&hires_width},
|
|
||||||
{"",
|
|
||||||
"--hires-height",
|
|
||||||
"highres fix target height, 0 to use --hires-scale (default: 0)",
|
|
||||||
&hires_height},
|
|
||||||
{"",
|
|
||||||
"--hires-steps",
|
|
||||||
"highres fix second pass sample steps, 0 to reuse --steps (default: 0)",
|
|
||||||
&hires_steps},
|
|
||||||
{"",
|
|
||||||
"--hires-upscale-tile-size",
|
|
||||||
"highres fix upscaler tile size, reserved for model-backed upscalers (default: 128)",
|
|
||||||
&hires_upscale_tile_size},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
options.float_options = {
|
options.float_options = {
|
||||||
@ -963,14 +913,6 @@ ArgOptions SDGenerationParams::get_options() {
|
|||||||
"--vae-tile-overlap",
|
"--vae-tile-overlap",
|
||||||
"tile overlap for vae tiling, in fraction of tile size (default: 0.5)",
|
"tile overlap for vae tiling, in fraction of tile size (default: 0.5)",
|
||||||
&vae_tiling_params.target_overlap},
|
&vae_tiling_params.target_overlap},
|
||||||
{"",
|
|
||||||
"--hires-scale",
|
|
||||||
"highres fix scale when target size is not set (default: 2.0)",
|
|
||||||
&hires_scale},
|
|
||||||
{"",
|
|
||||||
"--hires-denoising-strength",
|
|
||||||
"highres fix second pass denoising strength (default: 0.7)",
|
|
||||||
&hires_denoising_strength},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
options.bool_options = {
|
options.bool_options = {
|
||||||
@ -994,11 +936,6 @@ ArgOptions SDGenerationParams::get_options() {
|
|||||||
"process vae in tiles to reduce memory usage",
|
"process vae in tiles to reduce memory usage",
|
||||||
true,
|
true,
|
||||||
&vae_tiling_params.enabled},
|
&vae_tiling_params.enabled},
|
||||||
{"",
|
|
||||||
"--hires",
|
|
||||||
"enable highres fix",
|
|
||||||
true,
|
|
||||||
&hires_enabled},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto on_seed_arg = [&](int argc, const char** argv, int index) {
|
auto on_seed_arg = [&](int argc, const char** argv, int index) {
|
||||||
@ -1248,12 +1185,12 @@ ArgOptions SDGenerationParams::get_options() {
|
|||||||
on_seed_arg},
|
on_seed_arg},
|
||||||
{"",
|
{"",
|
||||||
"--sampling-method",
|
"--sampling-method",
|
||||||
"sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s, er_sde, euler_cfg_pp, euler_a_cfg_pp]"
|
"sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s, er_sde] "
|
||||||
"(default: euler for Flux/SD3/Wan, euler_a otherwise)",
|
"(default: euler for Flux/SD3/Wan, euler_a otherwise)",
|
||||||
on_sample_method_arg},
|
on_sample_method_arg},
|
||||||
{"",
|
{"",
|
||||||
"--high-noise-sampling-method",
|
"--high-noise-sampling-method",
|
||||||
"(high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s, er_sde, euler_cfg_pp, euler_a_cfg_pp]"
|
"(high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s, er_sde]"
|
||||||
" default: euler for Flux/SD3/Wan, euler_a otherwise",
|
" default: euler for Flux/SD3/Wan, euler_a otherwise",
|
||||||
on_high_noise_sample_method_arg},
|
on_high_noise_sample_method_arg},
|
||||||
{"",
|
{"",
|
||||||
@ -1487,37 +1424,6 @@ static bool parse_lora_json_field(const json& parent,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool resolve_model_file_from_dir(const std::string& model_name,
|
|
||||||
const std::string& model_dir,
|
|
||||||
const std::vector<std::string>& valid_ext,
|
|
||||||
const char* label,
|
|
||||||
std::string& resolved_path) {
|
|
||||||
if (model_dir.empty()) {
|
|
||||||
LOG_ERROR("%s directory is empty", label);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (model_name.empty() ||
|
|
||||||
model_name.find('/') != std::string::npos ||
|
|
||||||
model_name.find('\\') != std::string::npos ||
|
|
||||||
fs::path(model_name).has_root_path() ||
|
|
||||||
fs::path(model_name).has_extension()) {
|
|
||||||
LOG_ERROR("%s must be a model name without path or extension: %s", label, model_name.c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
fs::path model_dir_path = model_dir;
|
|
||||||
for (const auto& ext : valid_ext) {
|
|
||||||
fs::path try_path = model_dir_path / (model_name + ext);
|
|
||||||
if (fs::exists(try_path) && fs::is_regular_file(try_path)) {
|
|
||||||
resolved_path = try_path.lexically_normal().string();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_ERROR("can not find %s %s in %s", label, model_name.c_str(), model_dir_path.lexically_normal().string().c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SDGenerationParams::from_json_str(
|
bool SDGenerationParams::from_json_str(
|
||||||
const std::string& json_str,
|
const std::string& json_str,
|
||||||
const std::function<std::string(const std::string&)>& lora_path_resolver) {
|
const std::function<std::string(const std::string&)>& lora_path_resolver) {
|
||||||
@ -1581,37 +1487,8 @@ bool SDGenerationParams::from_json_str(
|
|||||||
load_if_exists("increase_ref_index", increase_ref_index);
|
load_if_exists("increase_ref_index", increase_ref_index);
|
||||||
load_if_exists("embed_image_metadata", embed_image_metadata);
|
load_if_exists("embed_image_metadata", embed_image_metadata);
|
||||||
|
|
||||||
if (j.contains("hires") && j["hires"].is_object()) {
|
|
||||||
const json& hires_json = j["hires"];
|
|
||||||
if (hires_json.contains("enabled") && hires_json["enabled"].is_boolean()) {
|
|
||||||
hires_enabled = hires_json["enabled"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("upscaler") && hires_json["upscaler"].is_string()) {
|
|
||||||
hires_upscaler = hires_json["upscaler"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("scale") && hires_json["scale"].is_number()) {
|
|
||||||
hires_scale = hires_json["scale"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("target_width") && hires_json["target_width"].is_number_integer()) {
|
|
||||||
hires_width = hires_json["target_width"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("target_height") && hires_json["target_height"].is_number_integer()) {
|
|
||||||
hires_height = hires_json["target_height"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("steps") && hires_json["steps"].is_number_integer()) {
|
|
||||||
hires_steps = hires_json["steps"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("denoising_strength") && hires_json["denoising_strength"].is_number()) {
|
|
||||||
hires_denoising_strength = hires_json["denoising_strength"];
|
|
||||||
}
|
|
||||||
if (hires_json.contains("upscale_tile_size") && hires_json["upscale_tile_size"].is_number_integer()) {
|
|
||||||
hires_upscale_tile_size = hires_json["upscale_tile_size"];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto parse_sample_params_json = [&](const json& sample_json,
|
auto parse_sample_params_json = [&](const json& sample_json,
|
||||||
sd_sample_params_t& target_params,
|
sd_sample_params_t& target_params,
|
||||||
std::string& target_extra_sample_args,
|
|
||||||
std::vector<int>& target_skip_layers,
|
std::vector<int>& target_skip_layers,
|
||||||
std::vector<float>* target_custom_sigmas) {
|
std::vector<float>* target_custom_sigmas) {
|
||||||
if (sample_json.contains("sample_steps") && sample_json["sample_steps"].is_number_integer()) {
|
if (sample_json.contains("sample_steps") && sample_json["sample_steps"].is_number_integer()) {
|
||||||
@ -1626,9 +1503,6 @@ bool SDGenerationParams::from_json_str(
|
|||||||
if (sample_json.contains("flow_shift") && sample_json["flow_shift"].is_number()) {
|
if (sample_json.contains("flow_shift") && sample_json["flow_shift"].is_number()) {
|
||||||
target_params.flow_shift = sample_json["flow_shift"];
|
target_params.flow_shift = sample_json["flow_shift"];
|
||||||
}
|
}
|
||||||
if (sample_json.contains("extra_sample_args") && sample_json["extra_sample_args"].is_string()) {
|
|
||||||
target_extra_sample_args = sample_json["extra_sample_args"].get<std::string>();
|
|
||||||
}
|
|
||||||
if (target_custom_sigmas != nullptr &&
|
if (target_custom_sigmas != nullptr &&
|
||||||
sample_json.contains("custom_sigmas") &&
|
sample_json.contains("custom_sigmas") &&
|
||||||
sample_json["custom_sigmas"].is_array()) {
|
sample_json["custom_sigmas"].is_array()) {
|
||||||
@ -1676,12 +1550,11 @@ bool SDGenerationParams::from_json_str(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (j.contains("sample_params") && j["sample_params"].is_object()) {
|
if (j.contains("sample_params") && j["sample_params"].is_object()) {
|
||||||
parse_sample_params_json(j["sample_params"], sample_params, extra_sample_args, skip_layers, &custom_sigmas);
|
parse_sample_params_json(j["sample_params"], sample_params, skip_layers, &custom_sigmas);
|
||||||
}
|
}
|
||||||
if (j.contains("high_noise_sample_params") && j["high_noise_sample_params"].is_object()) {
|
if (j.contains("high_noise_sample_params") && j["high_noise_sample_params"].is_object()) {
|
||||||
parse_sample_params_json(j["high_noise_sample_params"],
|
parse_sample_params_json(j["high_noise_sample_params"],
|
||||||
high_noise_sample_params,
|
high_noise_sample_params,
|
||||||
high_noise_extra_sample_args,
|
|
||||||
high_noise_skip_layers,
|
high_noise_skip_layers,
|
||||||
nullptr);
|
nullptr);
|
||||||
}
|
}
|
||||||
@ -1927,7 +1800,7 @@ bool SDGenerationParams::initialize_cache_params() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SDGenerationParams::resolve(const std::string& lora_model_dir, const std::string& hires_upscalers_dir, bool strict) {
|
bool SDGenerationParams::resolve(const std::string& lora_model_dir, bool strict) {
|
||||||
if (high_noise_sample_params.sample_steps <= 0) {
|
if (high_noise_sample_params.sample_steps <= 0) {
|
||||||
high_noise_sample_params.sample_steps = -1;
|
high_noise_sample_params.sample_steps = -1;
|
||||||
}
|
}
|
||||||
@ -1946,27 +1819,6 @@ bool SDGenerationParams::resolve(const std::string& lora_model_dir, const std::s
|
|||||||
sample_params.sample_steps = std::clamp(sample_params.sample_steps, 1, 100);
|
sample_params.sample_steps = std::clamp(sample_params.sample_steps, 1, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
hires_upscaler_model_path.clear();
|
|
||||||
if (hires_enabled) {
|
|
||||||
if (hires_upscaler.empty()) {
|
|
||||||
hires_upscaler = "Latent";
|
|
||||||
}
|
|
||||||
resolved_hires_upscaler = str_to_sd_hires_upscaler(hires_upscaler.c_str());
|
|
||||||
if (resolved_hires_upscaler == SD_HIRES_UPSCALER_NONE) {
|
|
||||||
hires_enabled = false;
|
|
||||||
} else if (resolved_hires_upscaler == SD_HIRES_UPSCALER_COUNT) {
|
|
||||||
static const std::vector<std::string> valid_ext = {".gguf", ".safetensors", ".pt", ".pth"};
|
|
||||||
if (!resolve_model_file_from_dir(hires_upscaler,
|
|
||||||
hires_upscalers_dir,
|
|
||||||
valid_ext,
|
|
||||||
"hires upscaler",
|
|
||||||
hires_upscaler_model_path)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
resolved_hires_upscaler = SD_HIRES_UPSCALER_MODEL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt_with_lora = prompt;
|
prompt_with_lora = prompt;
|
||||||
if (!lora_model_dir.empty()) {
|
if (!lora_model_dir.empty()) {
|
||||||
extract_and_remove_lora(lora_model_dir);
|
extract_and_remove_lora(lora_model_dir);
|
||||||
@ -2031,29 +1883,6 @@ bool SDGenerationParams::validate(SDMode mode) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hires_enabled) {
|
|
||||||
if (hires_width < 0 || hires_height < 0) {
|
|
||||||
LOG_ERROR("error: hires target width and height must be >= 0");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (hires_scale <= 0.f && hires_width <= 0 && hires_height <= 0) {
|
|
||||||
LOG_ERROR("error: hires scale must be positive when target size is not set");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (hires_steps < 0) {
|
|
||||||
LOG_ERROR("error: hires steps must be >= 0");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (hires_denoising_strength <= 0.f || hires_denoising_strength > 1.f) {
|
|
||||||
LOG_ERROR("error: hires denoising strength must be in (0.0, 1.0]");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (hires_upscale_tile_size < 1) {
|
|
||||||
LOG_ERROR("error: hires upscale tile size must be positive");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mode == UPSCALE) {
|
if (mode == UPSCALE) {
|
||||||
if (init_image_path.length() == 0) {
|
if (init_image_path.length() == 0) {
|
||||||
LOG_ERROR("error: upscale mode needs an init image (--init-img)\n");
|
LOG_ERROR("error: upscale mode needs an init image (--init-img)\n");
|
||||||
@ -2064,11 +1893,8 @@ bool SDGenerationParams::validate(SDMode mode) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SDGenerationParams::resolve_and_validate(SDMode mode,
|
bool SDGenerationParams::resolve_and_validate(SDMode mode, const std::string& lora_model_dir, bool strict) {
|
||||||
const std::string& lora_model_dir,
|
if (!resolve(lora_model_dir, strict)) {
|
||||||
const std::string& hires_upscalers_dir,
|
|
||||||
bool strict) {
|
|
||||||
if (!resolve(lora_model_dir, hires_upscalers_dir, strict)) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!validate(mode)) {
|
if (!validate(mode)) {
|
||||||
@ -2108,8 +1934,6 @@ sd_img_gen_params_t SDGenerationParams::to_sd_img_gen_params_t() {
|
|||||||
high_noise_sample_params.guidance.slg.layer_count = high_noise_skip_layers.size();
|
high_noise_sample_params.guidance.slg.layer_count = high_noise_skip_layers.size();
|
||||||
sample_params.custom_sigmas = custom_sigmas.empty() ? nullptr : custom_sigmas.data();
|
sample_params.custom_sigmas = custom_sigmas.empty() ? nullptr : custom_sigmas.data();
|
||||||
sample_params.custom_sigmas_count = static_cast<int>(custom_sigmas.size());
|
sample_params.custom_sigmas_count = static_cast<int>(custom_sigmas.size());
|
||||||
sample_params.extra_sample_args = extra_sample_args.empty() ? nullptr : extra_sample_args.c_str();
|
|
||||||
high_noise_sample_params.extra_sample_args = high_noise_extra_sample_args.empty() ? nullptr : high_noise_extra_sample_args.c_str();
|
|
||||||
cache_params.scm_mask = scm_mask.empty() ? nullptr : scm_mask.c_str();
|
cache_params.scm_mask = scm_mask.empty() ? nullptr : scm_mask.c_str();
|
||||||
|
|
||||||
sd_pm_params_t pm_params = {
|
sd_pm_params_t pm_params = {
|
||||||
@ -2141,16 +1965,6 @@ sd_img_gen_params_t SDGenerationParams::to_sd_img_gen_params_t() {
|
|||||||
params.pm_params = pm_params;
|
params.pm_params = pm_params;
|
||||||
params.vae_tiling_params = vae_tiling_params;
|
params.vae_tiling_params = vae_tiling_params;
|
||||||
params.cache = cache_params;
|
params.cache = cache_params;
|
||||||
|
|
||||||
params.hires.enabled = hires_enabled;
|
|
||||||
params.hires.upscaler = resolved_hires_upscaler;
|
|
||||||
params.hires.model_path = hires_upscaler_model_path.empty() ? nullptr : hires_upscaler_model_path.c_str();
|
|
||||||
params.hires.scale = hires_scale;
|
|
||||||
params.hires.target_width = hires_width;
|
|
||||||
params.hires.target_height = hires_height;
|
|
||||||
params.hires.steps = hires_steps;
|
|
||||||
params.hires.denoising_strength = hires_denoising_strength;
|
|
||||||
params.hires.upscale_tile_size = hires_upscale_tile_size;
|
|
||||||
return params;
|
return params;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2179,8 +1993,6 @@ sd_vid_gen_params_t SDGenerationParams::to_sd_vid_gen_params_t() {
|
|||||||
high_noise_sample_params.guidance.slg.layer_count = high_noise_skip_layers.size();
|
high_noise_sample_params.guidance.slg.layer_count = high_noise_skip_layers.size();
|
||||||
sample_params.custom_sigmas = custom_sigmas.empty() ? nullptr : custom_sigmas.data();
|
sample_params.custom_sigmas = custom_sigmas.empty() ? nullptr : custom_sigmas.data();
|
||||||
sample_params.custom_sigmas_count = static_cast<int>(custom_sigmas.size());
|
sample_params.custom_sigmas_count = static_cast<int>(custom_sigmas.size());
|
||||||
sample_params.extra_sample_args = extra_sample_args.empty() ? nullptr : extra_sample_args.c_str();
|
|
||||||
high_noise_sample_params.extra_sample_args = high_noise_extra_sample_args.empty() ? nullptr : high_noise_extra_sample_args.c_str();
|
|
||||||
cache_params.scm_mask = scm_mask.empty() ? nullptr : scm_mask.c_str();
|
cache_params.scm_mask = scm_mask.empty() ? nullptr : scm_mask.c_str();
|
||||||
|
|
||||||
params.loras = lora_vec.empty() ? nullptr : lora_vec.data();
|
params.loras = lora_vec.empty() ? nullptr : lora_vec.data();
|
||||||
@ -2277,15 +2089,6 @@ std::string SDGenerationParams::to_string() const {
|
|||||||
<< " seed: " << seed << ",\n"
|
<< " seed: " << seed << ",\n"
|
||||||
<< " upscale_repeats: " << upscale_repeats << ",\n"
|
<< " upscale_repeats: " << upscale_repeats << ",\n"
|
||||||
<< " upscale_tile_size: " << upscale_tile_size << ",\n"
|
<< " upscale_tile_size: " << upscale_tile_size << ",\n"
|
||||||
<< " hires: { enabled: " << (hires_enabled ? "true" : "false")
|
|
||||||
<< ", upscaler: \"" << hires_upscaler << "\""
|
|
||||||
<< ", model_path: \"" << hires_upscaler_model_path << "\""
|
|
||||||
<< ", scale: " << hires_scale
|
|
||||||
<< ", target_width: " << hires_width
|
|
||||||
<< ", target_height: " << hires_height
|
|
||||||
<< ", steps: " << hires_steps
|
|
||||||
<< ", denoising_strength: " << hires_denoising_strength
|
|
||||||
<< ", upscale_tile_size: " << hires_upscale_tile_size << " },\n"
|
|
||||||
<< " vae_tiling_params: { "
|
<< " vae_tiling_params: { "
|
||||||
<< vae_tiling_params.enabled << ", "
|
<< vae_tiling_params.enabled << ", "
|
||||||
<< vae_tiling_params.tile_size_x << ", "
|
<< vae_tiling_params.tile_size_x << ", "
|
||||||
@ -2301,193 +2104,7 @@ std::string version_string() {
|
|||||||
return std::string("stable-diffusion.cpp version ") + sd_version() + ", commit " + sd_commit();
|
return std::string("stable-diffusion.cpp version ") + sd_version() + ", commit " + sd_commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string safe_json_string(const char* value) {
|
std::string get_image_params(const SDContextParams& ctx_params, const SDGenerationParams& gen_params, int64_t seed) {
|
||||||
return value ? value : "";
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_json_basename_if_not_empty(json& target, const char* key, const std::string& path) {
|
|
||||||
if (!path.empty()) {
|
|
||||||
target[key] = sd_basename(path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static json build_sampling_metadata_json(const sd_sample_params_t& sample_params,
|
|
||||||
const std::vector<int>& skip_layers,
|
|
||||||
const std::vector<float>* custom_sigmas = nullptr) {
|
|
||||||
json sampling = {
|
|
||||||
{"steps", sample_params.sample_steps},
|
|
||||||
{"eta", sample_params.eta},
|
|
||||||
{"shifted_timestep", sample_params.shifted_timestep},
|
|
||||||
{"flow_shift", sample_params.flow_shift},
|
|
||||||
{"extra_sample_args", safe_json_string(sample_params.extra_sample_args)},
|
|
||||||
{"guidance",
|
|
||||||
{
|
|
||||||
{"txt_cfg", sample_params.guidance.txt_cfg},
|
|
||||||
{"img_cfg", sample_params.guidance.img_cfg},
|
|
||||||
{"distilled_guidance", sample_params.guidance.distilled_guidance},
|
|
||||||
{"slg",
|
|
||||||
{
|
|
||||||
{"scale", sample_params.guidance.slg.scale},
|
|
||||||
{"layers", skip_layers},
|
|
||||||
{"start", sample_params.guidance.slg.layer_start},
|
|
||||||
{"end", sample_params.guidance.slg.layer_end},
|
|
||||||
}},
|
|
||||||
}},
|
|
||||||
};
|
|
||||||
if (sample_params.sample_method != SAMPLE_METHOD_COUNT) {
|
|
||||||
sampling["method"] = safe_json_string(sd_sample_method_name(sample_params.sample_method));
|
|
||||||
}
|
|
||||||
if (sample_params.scheduler != SCHEDULER_COUNT) {
|
|
||||||
sampling["scheduler"] = safe_json_string(sd_scheduler_name(sample_params.scheduler));
|
|
||||||
}
|
|
||||||
if (custom_sigmas != nullptr) {
|
|
||||||
sampling["custom_sigmas"] = *custom_sigmas;
|
|
||||||
}
|
|
||||||
return sampling;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string build_sdcpp_image_metadata_json(const SDContextParams& ctx_params,
|
|
||||||
const SDGenerationParams& gen_params,
|
|
||||||
int64_t seed,
|
|
||||||
SDMode mode) {
|
|
||||||
json root;
|
|
||||||
root["schema"] = "sdcpp.image.params/v1";
|
|
||||||
root["mode"] = mode == VID_GEN ? "vid_gen" : "img_gen";
|
|
||||||
root["generator"] = {
|
|
||||||
{"name", "stable-diffusion.cpp"},
|
|
||||||
{"version", safe_json_string(sd_version())},
|
|
||||||
{"commit", safe_json_string(sd_commit())},
|
|
||||||
};
|
|
||||||
root["seed"] = seed;
|
|
||||||
root["width"] = gen_params.get_resolved_width();
|
|
||||||
root["height"] = gen_params.get_resolved_height();
|
|
||||||
|
|
||||||
root["prompt"] = {
|
|
||||||
{"positive", gen_params.prompt},
|
|
||||||
{"negative", gen_params.negative_prompt},
|
|
||||||
};
|
|
||||||
root["sampling"] = build_sampling_metadata_json(gen_params.sample_params,
|
|
||||||
gen_params.skip_layers,
|
|
||||||
&gen_params.custom_sigmas);
|
|
||||||
|
|
||||||
json models;
|
|
||||||
set_json_basename_if_not_empty(models, "model", ctx_params.model_path);
|
|
||||||
set_json_basename_if_not_empty(models, "clip_l", ctx_params.clip_l_path);
|
|
||||||
set_json_basename_if_not_empty(models, "clip_g", ctx_params.clip_g_path);
|
|
||||||
set_json_basename_if_not_empty(models, "clip_vision", ctx_params.clip_vision_path);
|
|
||||||
set_json_basename_if_not_empty(models, "t5xxl", ctx_params.t5xxl_path);
|
|
||||||
set_json_basename_if_not_empty(models, "llm", ctx_params.llm_path);
|
|
||||||
set_json_basename_if_not_empty(models, "llm_vision", ctx_params.llm_vision_path);
|
|
||||||
set_json_basename_if_not_empty(models, "diffusion_model", ctx_params.diffusion_model_path);
|
|
||||||
set_json_basename_if_not_empty(models, "high_noise_diffusion_model", ctx_params.high_noise_diffusion_model_path);
|
|
||||||
set_json_basename_if_not_empty(models, "vae", ctx_params.vae_path);
|
|
||||||
set_json_basename_if_not_empty(models, "taesd", ctx_params.taesd_path);
|
|
||||||
set_json_basename_if_not_empty(models, "control_net", ctx_params.control_net_path);
|
|
||||||
root["models"] = std::move(models);
|
|
||||||
|
|
||||||
root["clip_skip"] = gen_params.clip_skip;
|
|
||||||
root["strength"] = gen_params.strength;
|
|
||||||
root["control_strength"] = gen_params.control_strength;
|
|
||||||
root["auto_resize_ref_image"] = gen_params.auto_resize_ref_image;
|
|
||||||
root["increase_ref_index"] = gen_params.increase_ref_index;
|
|
||||||
if (mode == VID_GEN) {
|
|
||||||
root["video"] = {
|
|
||||||
{"frame_count", gen_params.video_frames},
|
|
||||||
{"fps", gen_params.fps},
|
|
||||||
};
|
|
||||||
root["moe_boundary"] = gen_params.moe_boundary;
|
|
||||||
root["vace_strength"] = gen_params.vace_strength;
|
|
||||||
root["high_noise_sampling"] = build_sampling_metadata_json(gen_params.high_noise_sample_params,
|
|
||||||
gen_params.high_noise_skip_layers);
|
|
||||||
}
|
|
||||||
|
|
||||||
root["rng"] = safe_json_string(sd_rng_type_name(ctx_params.rng_type));
|
|
||||||
if (ctx_params.sampler_rng_type != RNG_TYPE_COUNT) {
|
|
||||||
root["sampler_rng"] = safe_json_string(sd_rng_type_name(ctx_params.sampler_rng_type));
|
|
||||||
}
|
|
||||||
|
|
||||||
json loras = json::array();
|
|
||||||
for (const auto& entry : gen_params.lora_map) {
|
|
||||||
loras.push_back({
|
|
||||||
{"name", sd_basename(entry.first)},
|
|
||||||
{"multiplier", entry.second},
|
|
||||||
{"is_high_noise", false},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
for (const auto& entry : gen_params.high_noise_lora_map) {
|
|
||||||
loras.push_back({
|
|
||||||
{"name", sd_basename(entry.first)},
|
|
||||||
{"multiplier", entry.second},
|
|
||||||
{"is_high_noise", true},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (!loras.empty()) {
|
|
||||||
root["loras"] = std::move(loras);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gen_params.hires_enabled) {
|
|
||||||
root["hires"] = {
|
|
||||||
{"enabled", gen_params.hires_enabled},
|
|
||||||
{"upscaler", gen_params.hires_upscaler},
|
|
||||||
{"model", gen_params.hires_upscaler_model_path.empty() ? "" : sd_basename(gen_params.hires_upscaler_model_path)},
|
|
||||||
{"scale", gen_params.hires_scale},
|
|
||||||
{"target_width", gen_params.hires_width},
|
|
||||||
{"target_height", gen_params.hires_height},
|
|
||||||
{"steps", gen_params.hires_steps},
|
|
||||||
{"denoising_strength", gen_params.hires_denoising_strength},
|
|
||||||
{"upscale_tile_size", gen_params.hires_upscale_tile_size},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gen_params.cache_params.mode != SD_CACHE_DISABLED) {
|
|
||||||
root["cache"] = {
|
|
||||||
{"requested_mode", gen_params.cache_mode},
|
|
||||||
{"requested_option", gen_params.cache_option},
|
|
||||||
{"mode", gen_params.cache_params.mode},
|
|
||||||
{"scm_mask", gen_params.scm_mask},
|
|
||||||
{"scm_policy_dynamic", gen_params.scm_policy_dynamic},
|
|
||||||
{"reuse_threshold", gen_params.cache_params.reuse_threshold},
|
|
||||||
{"start_percent", gen_params.cache_params.start_percent},
|
|
||||||
{"end_percent", gen_params.cache_params.end_percent},
|
|
||||||
{"error_decay_rate", gen_params.cache_params.error_decay_rate},
|
|
||||||
{"use_relative_threshold", gen_params.cache_params.use_relative_threshold},
|
|
||||||
{"reset_error_on_compute", gen_params.cache_params.reset_error_on_compute},
|
|
||||||
{"Fn_compute_blocks", gen_params.cache_params.Fn_compute_blocks},
|
|
||||||
{"Bn_compute_blocks", gen_params.cache_params.Bn_compute_blocks},
|
|
||||||
{"residual_diff_threshold", gen_params.cache_params.residual_diff_threshold},
|
|
||||||
{"max_warmup_steps", gen_params.cache_params.max_warmup_steps},
|
|
||||||
{"max_cached_steps", gen_params.cache_params.max_cached_steps},
|
|
||||||
{"max_continuous_cached_steps", gen_params.cache_params.max_continuous_cached_steps},
|
|
||||||
{"taylorseer_n_derivatives", gen_params.cache_params.taylorseer_n_derivatives},
|
|
||||||
{"taylorseer_skip_interval", gen_params.cache_params.taylorseer_skip_interval},
|
|
||||||
{"spectrum_w", gen_params.cache_params.spectrum_w},
|
|
||||||
{"spectrum_m", gen_params.cache_params.spectrum_m},
|
|
||||||
{"spectrum_lam", gen_params.cache_params.spectrum_lam},
|
|
||||||
{"spectrum_window_size", gen_params.cache_params.spectrum_window_size},
|
|
||||||
{"spectrum_flex_window", gen_params.cache_params.spectrum_flex_window},
|
|
||||||
{"spectrum_warmup_steps", gen_params.cache_params.spectrum_warmup_steps},
|
|
||||||
{"spectrum_stop_percent", gen_params.cache_params.spectrum_stop_percent},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gen_params.vae_tiling_params.enabled) {
|
|
||||||
root["vae_tiling"] = {
|
|
||||||
{"enabled", gen_params.vae_tiling_params.enabled},
|
|
||||||
{"tile_size_x", gen_params.vae_tiling_params.tile_size_x},
|
|
||||||
{"tile_size_y", gen_params.vae_tiling_params.tile_size_y},
|
|
||||||
{"target_overlap", gen_params.vae_tiling_params.target_overlap},
|
|
||||||
{"rel_size_x", gen_params.vae_tiling_params.rel_size_x},
|
|
||||||
{"rel_size_y", gen_params.vae_tiling_params.rel_size_y},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
return root.dump();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string get_image_params(const SDContextParams& ctx_params,
|
|
||||||
const SDGenerationParams& gen_params,
|
|
||||||
int64_t seed,
|
|
||||||
SDMode mode) {
|
|
||||||
std::string parameter_string;
|
std::string parameter_string;
|
||||||
if (gen_params.prompt_with_lora.size() != 0) {
|
if (gen_params.prompt_with_lora.size() != 0) {
|
||||||
parameter_string += gen_params.prompt_with_lora + "\n";
|
parameter_string += gen_params.prompt_with_lora + "\n";
|
||||||
@ -2500,7 +2117,7 @@ std::string get_image_params(const SDContextParams& ctx_params,
|
|||||||
parameter_string += "Steps: " + std::to_string(gen_params.sample_params.sample_steps) + ", ";
|
parameter_string += "Steps: " + std::to_string(gen_params.sample_params.sample_steps) + ", ";
|
||||||
parameter_string += "CFG scale: " + std::to_string(gen_params.sample_params.guidance.txt_cfg) + ", ";
|
parameter_string += "CFG scale: " + std::to_string(gen_params.sample_params.guidance.txt_cfg) + ", ";
|
||||||
if (gen_params.sample_params.guidance.slg.scale != 0 && gen_params.skip_layers.size() != 0) {
|
if (gen_params.sample_params.guidance.slg.scale != 0 && gen_params.skip_layers.size() != 0) {
|
||||||
parameter_string += "SLG scale: " + std::to_string(gen_params.sample_params.guidance.slg.scale) + ", ";
|
parameter_string += "SLG scale: " + std::to_string(gen_params.sample_params.guidance.txt_cfg) + ", ";
|
||||||
parameter_string += "Skip layers: [";
|
parameter_string += "Skip layers: [";
|
||||||
for (const auto& layer : gen_params.skip_layers) {
|
for (const auto& layer : gen_params.skip_layers) {
|
||||||
parameter_string += std::to_string(layer) + ", ";
|
parameter_string += std::to_string(layer) + ", ";
|
||||||
@ -2511,9 +2128,6 @@ std::string get_image_params(const SDContextParams& ctx_params,
|
|||||||
}
|
}
|
||||||
parameter_string += "Guidance: " + std::to_string(gen_params.sample_params.guidance.distilled_guidance) + ", ";
|
parameter_string += "Guidance: " + std::to_string(gen_params.sample_params.guidance.distilled_guidance) + ", ";
|
||||||
parameter_string += "Eta: " + std::to_string(gen_params.sample_params.eta) + ", ";
|
parameter_string += "Eta: " + std::to_string(gen_params.sample_params.eta) + ", ";
|
||||||
if (!gen_params.extra_sample_args.empty()) {
|
|
||||||
parameter_string += "Extra sample args: " + gen_params.extra_sample_args + ", ";
|
|
||||||
}
|
|
||||||
parameter_string += "Seed: " + std::to_string(seed) + ", ";
|
parameter_string += "Seed: " + std::to_string(seed) + ", ";
|
||||||
parameter_string += "Size: " + std::to_string(gen_params.get_resolved_width()) + "x" + std::to_string(gen_params.get_resolved_height()) + ", ";
|
parameter_string += "Size: " + std::to_string(gen_params.get_resolved_width()) + "x" + std::to_string(gen_params.get_resolved_height()) + ", ";
|
||||||
parameter_string += "Model: " + sd_basename(ctx_params.model_path) + ", ";
|
parameter_string += "Model: " + sd_basename(ctx_params.model_path) + ", ";
|
||||||
@ -2548,14 +2162,6 @@ std::string get_image_params(const SDContextParams& ctx_params,
|
|||||||
if (gen_params.clip_skip != -1) {
|
if (gen_params.clip_skip != -1) {
|
||||||
parameter_string += "Clip skip: " + std::to_string(gen_params.clip_skip) + ", ";
|
parameter_string += "Clip skip: " + std::to_string(gen_params.clip_skip) + ", ";
|
||||||
}
|
}
|
||||||
if (gen_params.hires_enabled) {
|
|
||||||
parameter_string += "Hires upscale: " + gen_params.hires_upscaler + ", ";
|
|
||||||
parameter_string += "Hires scale: " + std::to_string(gen_params.hires_scale) + ", ";
|
|
||||||
parameter_string += "Hires resize: " + std::to_string(gen_params.hires_width) + "x" + std::to_string(gen_params.hires_height) + ", ";
|
|
||||||
parameter_string += "Hires steps: " + std::to_string(gen_params.hires_steps) + ", ";
|
|
||||||
parameter_string += "Denoising strength: " + std::to_string(gen_params.hires_denoising_strength) + ", ";
|
|
||||||
}
|
|
||||||
parameter_string += "Version: stable-diffusion.cpp";
|
parameter_string += "Version: stable-diffusion.cpp";
|
||||||
parameter_string += ", SDCPP: " + build_sdcpp_image_metadata_json(ctx_params, gen_params, seed, mode);
|
|
||||||
return parameter_string;
|
return parameter_string;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -101,7 +101,6 @@ struct SDContextParams {
|
|||||||
sd_type_t wtype = SD_TYPE_COUNT;
|
sd_type_t wtype = SD_TYPE_COUNT;
|
||||||
std::string tensor_type_rules;
|
std::string tensor_type_rules;
|
||||||
std::string lora_model_dir = ".";
|
std::string lora_model_dir = ".";
|
||||||
std::string hires_upscalers_dir;
|
|
||||||
|
|
||||||
std::map<std::string, std::string> embedding_map;
|
std::map<std::string, std::string> embedding_map;
|
||||||
std::vector<sd_embedding_t> embedding_vec;
|
std::vector<sd_embedding_t> embedding_vec;
|
||||||
@ -109,7 +108,6 @@ struct SDContextParams {
|
|||||||
rng_type_t rng_type = CUDA_RNG;
|
rng_type_t rng_type = CUDA_RNG;
|
||||||
rng_type_t sampler_rng_type = RNG_TYPE_COUNT;
|
rng_type_t sampler_rng_type = RNG_TYPE_COUNT;
|
||||||
bool offload_params_to_cpu = false;
|
bool offload_params_to_cpu = false;
|
||||||
float max_vram = 0.f;
|
|
||||||
bool enable_mmap = false;
|
bool enable_mmap = false;
|
||||||
bool control_net_cpu = false;
|
bool control_net_cpu = false;
|
||||||
bool clip_on_cpu = false;
|
bool clip_on_cpu = false;
|
||||||
@ -168,8 +166,6 @@ struct SDGenerationParams {
|
|||||||
|
|
||||||
sd_sample_params_t sample_params;
|
sd_sample_params_t sample_params;
|
||||||
sd_sample_params_t high_noise_sample_params;
|
sd_sample_params_t high_noise_sample_params;
|
||||||
std::string extra_sample_args;
|
|
||||||
std::string high_noise_extra_sample_args;
|
|
||||||
std::vector<int> skip_layers = {7, 8, 9};
|
std::vector<int> skip_layers = {7, 8, 9};
|
||||||
std::vector<int> high_noise_skip_layers = {7, 8, 9};
|
std::vector<int> high_noise_skip_layers = {7, 8, 9};
|
||||||
|
|
||||||
@ -194,23 +190,12 @@ struct SDGenerationParams {
|
|||||||
int upscale_repeats = 1;
|
int upscale_repeats = 1;
|
||||||
int upscale_tile_size = 128;
|
int upscale_tile_size = 128;
|
||||||
|
|
||||||
bool hires_enabled = false;
|
|
||||||
std::string hires_upscaler = "Latent";
|
|
||||||
std::string hires_upscaler_model_path;
|
|
||||||
float hires_scale = 2.f;
|
|
||||||
int hires_width = 0;
|
|
||||||
int hires_height = 0;
|
|
||||||
int hires_steps = 0;
|
|
||||||
float hires_denoising_strength = 0.7f;
|
|
||||||
int hires_upscale_tile_size = 128;
|
|
||||||
|
|
||||||
std::map<std::string, float> lora_map;
|
std::map<std::string, float> lora_map;
|
||||||
std::map<std::string, float> high_noise_lora_map;
|
std::map<std::string, float> high_noise_lora_map;
|
||||||
|
|
||||||
// Derived and normalized fields.
|
// Derived and normalized fields.
|
||||||
std::string prompt_with_lora; // for metadata record only
|
std::string prompt_with_lora; // for metadata record only
|
||||||
std::vector<sd_lora_t> lora_vec;
|
std::vector<sd_lora_t> lora_vec;
|
||||||
sd_hires_upscaler_t resolved_hires_upscaler;
|
|
||||||
|
|
||||||
// Owned execution payload.
|
// Owned execution payload.
|
||||||
SDImageOwner init_image;
|
SDImageOwner init_image;
|
||||||
@ -240,25 +225,15 @@ struct SDGenerationParams {
|
|||||||
void set_width_and_height_if_unset(int w, int h);
|
void set_width_and_height_if_unset(int w, int h);
|
||||||
int get_resolved_width() const;
|
int get_resolved_width() const;
|
||||||
int get_resolved_height() const;
|
int get_resolved_height() const;
|
||||||
bool resolve(const std::string& lora_model_dir, const std::string& hires_upscalers_dir, bool strict = false);
|
bool resolve(const std::string& lora_model_dir, bool strict = false);
|
||||||
bool validate(SDMode mode);
|
bool validate(SDMode mode);
|
||||||
bool resolve_and_validate(SDMode mode,
|
bool resolve_and_validate(SDMode mode, const std::string& lora_model_dir, bool strict = false);
|
||||||
const std::string& lora_model_dir,
|
|
||||||
const std::string& hires_upscalers_dir,
|
|
||||||
bool strict = false);
|
|
||||||
sd_img_gen_params_t to_sd_img_gen_params_t();
|
sd_img_gen_params_t to_sd_img_gen_params_t();
|
||||||
sd_vid_gen_params_t to_sd_vid_gen_params_t();
|
sd_vid_gen_params_t to_sd_vid_gen_params_t();
|
||||||
std::string to_string() const;
|
std::string to_string() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string version_string();
|
std::string version_string();
|
||||||
std::string build_sdcpp_image_metadata_json(const SDContextParams& ctx_params,
|
std::string get_image_params(const SDContextParams& ctx_params, const SDGenerationParams& gen_params, int64_t seed);
|
||||||
const SDGenerationParams& gen_params,
|
|
||||||
int64_t seed,
|
|
||||||
SDMode mode = IMG_GEN);
|
|
||||||
std::string get_image_params(const SDContextParams& ctx_params,
|
|
||||||
const SDGenerationParams& gen_params,
|
|
||||||
int64_t seed,
|
|
||||||
SDMode mode = IMG_GEN);
|
|
||||||
|
|
||||||
#endif // __EXAMPLES_COMMON_COMMON_H__
|
#endif // __EXAMPLES_COMMON_COMMON_H__
|
||||||
|
|||||||
@ -136,8 +136,7 @@ Context Options:
|
|||||||
--clip_g <string> path to the clip-g text encoder
|
--clip_g <string> path to the clip-g text encoder
|
||||||
--clip_vision <string> path to the clip-vision encoder
|
--clip_vision <string> path to the clip-vision encoder
|
||||||
--t5xxl <string> path to the t5xxl text encoder
|
--t5xxl <string> path to the t5xxl text encoder
|
||||||
--llm <string> path to the llm text encoder. For example: (qwenvl2.5 for qwen-image,
|
--llm <string> path to the llm text encoder. For example: (qwenvl2.5 for qwen-image, mistral-small3.2 for flux2, ...)
|
||||||
mistral-small3.2 for flux2, ...)
|
|
||||||
--llm_vision <string> path to the llm vit
|
--llm_vision <string> path to the llm vit
|
||||||
--qwen2vl <string> alias of --llm. Deprecated.
|
--qwen2vl <string> alias of --llm. Deprecated.
|
||||||
--qwen2vl_vision <string> alias of --llm_vision. Deprecated.
|
--qwen2vl_vision <string> alias of --llm_vision. Deprecated.
|
||||||
@ -149,18 +148,16 @@ Context Options:
|
|||||||
--control-net <string> path to control net model
|
--control-net <string> path to control net model
|
||||||
--embd-dir <string> embeddings directory
|
--embd-dir <string> embeddings directory
|
||||||
--lora-model-dir <string> lora model directory
|
--lora-model-dir <string> lora model directory
|
||||||
--hires-upscalers-dir <string> highres fix upscaler model directory
|
|
||||||
--tensor-type-rules <string> weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0")
|
--tensor-type-rules <string> weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0")
|
||||||
--photo-maker <string> path to PHOTOMAKER model
|
--photo-maker <string> path to PHOTOMAKER model
|
||||||
--upscale-model <string> path to esrgan model.
|
--upscale-model <string> path to esrgan model.
|
||||||
-t, --threads <int> number of threads to use during computation (default: -1). If threads <= 0,
|
-t, --threads <int> number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of
|
||||||
then threads will be set to the number of CPU physical cores
|
CPU physical cores
|
||||||
--chroma-t5-mask-pad <int> t5 mask pad size of chroma
|
--chroma-t5-mask-pad <int> t5 mask pad size of chroma
|
||||||
--max-vram <float> maximum VRAM budget in GiB for graph-cut segmented execution. 0 disables
|
--vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5)
|
||||||
graph splitting
|
--vae-tiling process vae in tiles to reduce memory usage
|
||||||
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
|
--force-sdxl-vae-conv-scale force use of conv scale on sdxl vae
|
||||||
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM
|
--offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed
|
||||||
when needed
|
|
||||||
--mmap whether to memory-map model
|
--mmap whether to memory-map model
|
||||||
--control-net-cpu keep controlnet in cpu (for low vram)
|
--control-net-cpu keep controlnet in cpu (for low vram)
|
||||||
--clip-on-cpu keep clip in cpu (for low vram)
|
--clip-on-cpu keep clip in cpu (for low vram)
|
||||||
@ -175,19 +172,20 @@ Context Options:
|
|||||||
--chroma-disable-dit-mask disable dit mask for chroma
|
--chroma-disable-dit-mask disable dit mask for chroma
|
||||||
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
|
--qwen-image-zero-cond-t enable zero_cond_t for qwen image
|
||||||
--chroma-enable-t5-mask enable t5 mask for chroma
|
--chroma-enable-t5-mask enable t5 mask for chroma
|
||||||
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K,
|
--type weight type (examples: f32, f16, q4_0, q4_1, q5_0, q5_1, q8_0, q2_K, q3_K, q4_K). If not specified, the default is the
|
||||||
q4_K). If not specified, the default is the type of the weight file
|
type of the weight file
|
||||||
--rng RNG, one of [std_default, cuda, cpu], default: cuda(sd-webui), cpu(comfyui)
|
--rng RNG, one of [std_default, cuda, cpu], default: cuda(sd-webui), cpu(comfyui)
|
||||||
--sampler-rng sampler RNG, one of [std_default, cuda, cpu]. If not specified, use --rng
|
--sampler-rng sampler RNG, one of [std_default, cuda, cpu]. If not specified, use --rng
|
||||||
--prediction prediction type override, one of [eps, v, edm_v, sd3_flow, flux_flow,
|
--prediction prediction type override, one of [eps, v, edm_v, sd3_flow, flux_flow, flux2_flow]
|
||||||
flux2_flow]
|
--lora-apply-mode the way to apply LoRA, one of [auto, immediately, at_runtime], default is auto. In auto mode, if the model weights
|
||||||
--lora-apply-mode the way to apply LoRA, one of [auto, immediately, at_runtime], default is
|
contain any quantized parameters, the at_runtime mode will be used; otherwise,
|
||||||
auto. In auto mode, if the model weights contain any quantized parameters,
|
immediately will be used.The immediately mode may have precision and
|
||||||
the at_runtime mode will be used; otherwise, immediately will be used.The
|
compatibility issues with quantized parameters, but it usually offers faster inference
|
||||||
immediately mode may have precision and compatibility issues with quantized
|
speed and, in some cases, lower memory usage. The at_runtime mode, on the
|
||||||
parameters, but it usually offers faster inference speed and, in some cases,
|
other hand, is exactly the opposite.
|
||||||
lower memory usage. The at_runtime mode, on the other hand, is exactly the
|
--vae-tile-size tile size for vae tiling, format [X]x[Y] (default: 32x32)
|
||||||
opposite.
|
--vae-relative-tile-size relative tile size for vae tiling, format [X]x[Y], in fraction of image size if < 1, in number of tiles per dim if >=1
|
||||||
|
(overrides --vae-tile-size)
|
||||||
|
|
||||||
Default Generation Options:
|
Default Generation Options:
|
||||||
-p, --prompt <string> the prompt to render
|
-p, --prompt <string> the prompt to render
|
||||||
@ -196,99 +194,65 @@ Default Generation Options:
|
|||||||
--end-img <string> path to the end image, required by flf2v
|
--end-img <string> path to the end image, required by flf2v
|
||||||
--mask <string> path to the mask image
|
--mask <string> path to the mask image
|
||||||
--control-image <string> path to control image, control net
|
--control-image <string> path to control image, control net
|
||||||
--control-video <string> path to control video frames, It must be a directory path. The video frames
|
--control-video <string> path to control video frames, It must be a directory path. The video frames inside should be stored as images in
|
||||||
inside should be stored as images in lexicographical (character) order. For
|
lexicographical (character) order. For example, if the control video path is
|
||||||
example, if the control video path is `frames`, the directory contain images
|
`frames`, the directory contain images such as 00.png, 01.png, ... etc.
|
||||||
such as 00.png, 01.png, ... etc.
|
|
||||||
--pm-id-images-dir <string> path to PHOTOMAKER input id images dir
|
--pm-id-images-dir <string> path to PHOTOMAKER input id images dir
|
||||||
--pm-id-embed-path <string> path to PHOTOMAKER v2 id embed
|
--pm-id-embed-path <string> path to PHOTOMAKER v2 id embed
|
||||||
--hires-upscaler <string> highres fix upscaler, Lanczos, Nearest, Latent, Latent (nearest), Latent
|
|
||||||
(nearest-exact), Latent (antialiased), Latent (bicubic), Latent (bicubic
|
|
||||||
antialiased), or a model name under --hires-upscalers-dir (default: Latent)
|
|
||||||
--extra-sample-args <string> extra sampler args, key=value list. Currently lcm supports noise_clip_std,
|
|
||||||
noise_scale_start, noise_scale_end
|
|
||||||
-H, --height <int> image height, in pixel space (default: 512)
|
-H, --height <int> image height, in pixel space (default: 512)
|
||||||
-W, --width <int> image width, in pixel space (default: 512)
|
-W, --width <int> image width, in pixel space (default: 512)
|
||||||
--steps <int> number of sample steps (default: 20)
|
--steps <int> number of sample steps (default: 20)
|
||||||
--high-noise-steps <int> (high noise) number of sample steps (default: -1 = auto)
|
--high-noise-steps <int> (high noise) number of sample steps (default: -1 = auto)
|
||||||
--clip-skip <int> ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer
|
--clip-skip <int> ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer (default: -1). <= 0 represents unspecified,
|
||||||
(default: -1). <= 0 represents unspecified, will be 1 for SD1.x, 2 for SD2.x
|
will be 1 for SD1.x, 2 for SD2.x
|
||||||
-b, --batch-count <int> batch count
|
-b, --batch-count <int> batch count
|
||||||
--video-frames <int> video frames (default: 1)
|
--video-frames <int> video frames (default: 1)
|
||||||
--fps <int> fps (default: 24)
|
--fps <int> fps (default: 24)
|
||||||
--timestep-shift <int> shift timestep for NitroFusion models (default: 0). recommended N for
|
--timestep-shift <int> shift timestep for NitroFusion models (default: 0). recommended N for NitroSD-Realism around 250 and 500 for
|
||||||
NitroSD-Realism around 250 and 500 for NitroSD-Vibrant
|
NitroSD-Vibrant
|
||||||
--upscale-repeats <int> Run the ESRGAN upscaler this many times (default: 1)
|
--upscale-repeats <int> Run the ESRGAN upscaler this many times (default: 1)
|
||||||
--upscale-tile-size <int> tile size for ESRGAN upscaling (default: 128)
|
--upscale-tile-size <int> tile size for ESRGAN upscaling (default: 128)
|
||||||
--hires-width <int> highres fix target width, 0 to use --hires-scale (default: 0)
|
|
||||||
--hires-height <int> highres fix target height, 0 to use --hires-scale (default: 0)
|
|
||||||
--hires-steps <int> highres fix second pass sample steps, 0 to reuse --steps (default: 0)
|
|
||||||
--hires-upscale-tile-size <int> highres fix upscaler tile size, reserved for model-backed upscalers (default:
|
|
||||||
128)
|
|
||||||
--cfg-scale <float> unconditional guidance scale: (default: 7.0)
|
--cfg-scale <float> unconditional guidance scale: (default: 7.0)
|
||||||
--img-cfg-scale <float> image guidance scale for inpaint or instruct-pix2pix models: (default: same
|
--img-cfg-scale <float> image guidance scale for inpaint or instruct-pix2pix models: (default: same as --cfg-scale)
|
||||||
as --cfg-scale)
|
|
||||||
--guidance <float> distilled guidance scale for models with guidance input (default: 3.5)
|
--guidance <float> distilled guidance scale for models with guidance input (default: 3.5)
|
||||||
--slg-scale <float> skip layer guidance (SLG) scale, only for DiT models: (default: 0). 0 means
|
--slg-scale <float> skip layer guidance (SLG) scale, only for DiT models: (default: 0). 0 means disabled, a value of 2.5 is nice for sd3.5
|
||||||
disabled, a value of 2.5 is nice for sd3.5 medium
|
medium
|
||||||
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
--skip-layer-start <float> SLG enabling point (default: 0.01)
|
||||||
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
--skip-layer-end <float> SLG disabling point (default: 0.2)
|
||||||
--eta <float> noise multiplier (default: 0 for ddim_trailing, tcd, res_multistep and
|
--eta <float> noise multiplier (default: 0 for ddim_trailing, tcd, res_multistep and res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
||||||
res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
|
||||||
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
--flow-shift <float> shift value for Flow models like SD3.x or WAN (default: auto)
|
||||||
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
--high-noise-cfg-scale <float> (high noise) unconditional guidance scale: (default: 7.0)
|
||||||
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models
|
--high-noise-img-cfg-scale <float> (high noise) image guidance scale for inpaint or instruct-pix2pix models (default: same as --cfg-scale)
|
||||||
(default: same as --cfg-scale)
|
--high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input (default: 3.5)
|
||||||
--high-noise-guidance <float> (high noise) distilled guidance scale for models with guidance input
|
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default: 0)
|
||||||
(default: 3.5)
|
|
||||||
--high-noise-slg-scale <float> (high noise) skip layer guidance (SLG) scale, only for DiT models: (default:
|
|
||||||
0)
|
|
||||||
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
--high-noise-skip-layer-start <float> (high noise) SLG enabling point (default: 0.01)
|
||||||
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
--high-noise-skip-layer-end <float> (high noise) SLG disabling point (default: 0.2)
|
||||||
--high-noise-eta <float> (high noise) noise multiplier (default: 0 for ddim_trailing, tcd,
|
--high-noise-eta <float> (high noise) noise multiplier (default: 0 for ddim_trailing, tcd, res_multistep and res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
||||||
res_multistep and res_2s; 1 for euler_a, er_sde and dpm++2s_a)
|
|
||||||
--strength <float> strength for noising/unnoising (default: 0.75)
|
--strength <float> strength for noising/unnoising (default: 0.75)
|
||||||
--pm-style-strength <float>
|
--pm-style-strength <float>
|
||||||
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full
|
--control-strength <float> strength to apply Control Net (default: 0.9). 1.0 corresponds to full destruction of information in init image
|
||||||
destruction of information in init image
|
--moe-boundary <float> timestep boundary for Wan2.2 MoE model. (default: 0.875). Only enabled if `--high-noise-steps` is set to -1
|
||||||
--moe-boundary <float> timestep boundary for Wan2.2 MoE model. (default: 0.875). Only enabled if
|
|
||||||
`--high-noise-steps` is set to -1
|
|
||||||
--vace-strength <float> wan vace strength
|
--vace-strength <float> wan vace strength
|
||||||
--vae-tile-overlap <float> tile overlap for vae tiling, in fraction of tile size (default: 0.5)
|
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
|
||||||
--hires-scale <float> highres fix scale when target size is not set (default: 2.0)
|
|
||||||
--hires-denoising-strength <float> highres fix second pass denoising strength (default: 0.7)
|
|
||||||
--increase-ref-index automatically increase the indices of references images based on the order
|
|
||||||
they are listed (starting with 1).
|
|
||||||
--disable-auto-resize-ref-image disable auto resize of ref images
|
--disable-auto-resize-ref-image disable auto resize of ref images
|
||||||
--disable-image-metadata do not embed generation metadata on image files
|
--disable-image-metadata do not embed generation metadata on image files
|
||||||
--vae-tiling process vae in tiles to reduce memory usage
|
|
||||||
--hires enable highres fix
|
|
||||||
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
-s, --seed RNG seed (default: 42, use random seed for < 0)
|
||||||
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m,
|
--sampling-method sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing,
|
||||||
dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep, res_2s,
|
tcd, res_multistep, res_2s, er_sde] (default: euler for Flux/SD3/Wan, euler_a
|
||||||
er_sde, euler_cfg_pp, euler_a_cfg_pp] (default: euler for Flux/SD3/Wan, euler_a otherwise)
|
otherwise)
|
||||||
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a,
|
--high-noise-sampling-method (high noise) sampling method, one of [euler, euler_a, heun, dpm2, dpm++2s_a, dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm,
|
||||||
dpm++2m, dpm++2mv2, ipndm, ipndm_v, lcm, ddim_trailing, tcd, res_multistep,
|
ddim_trailing, tcd, res_multistep, res_2s, er_sde] default: euler for Flux/SD3/Wan,
|
||||||
res_2s, er_sde, euler_cfg_pp, euler_a_cfg_pp] default: euler for Flux/SD3/Wan, euler_a otherwise
|
euler_a otherwise
|
||||||
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits,
|
--scheduler denoiser sigma scheduler, one of [discrete, karras, exponential, ays, gits, smoothstep, sgm_uniform, simple,
|
||||||
smoothstep, sgm_uniform, simple, kl_optimal, lcm, bong_tangent], default:
|
kl_optimal, lcm, bong_tangent], default: discrete
|
||||||
discrete
|
--sigmas custom sigma values for the sampler, comma-separated (e.g., "14.61,7.8,3.5,0.0").
|
||||||
--sigmas custom sigma values for the sampler, comma-separated (e.g.,
|
|
||||||
"14.61,7.8,3.5,0.0").
|
|
||||||
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
--skip-layers layers to skip for SLG steps (default: [7,8,9])
|
||||||
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
--high-noise-skip-layers (high noise) layers to skip for SLG steps (default: [7,8,9])
|
||||||
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
-r, --ref-image reference image for Flux Kontext models (can be used multiple times)
|
||||||
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET),
|
--cache-mode caching method: 'easycache' (DiT), 'ucache' (UNET), 'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT Chebyshev+Taylor forecasting)
|
||||||
'dbcache'/'taylorseer'/'cache-dit' (DiT block-level), 'spectrum' (UNET/DiT
|
|
||||||
Chebyshev+Taylor forecasting)
|
|
||||||
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
--cache-option named cache params (key=value format, comma-separated). easycache/ucache:
|
||||||
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit:
|
threshold=,start=,end=,decay=,relative=,reset=; dbcache/taylorseer/cache-dit: Fn=,Bn=,threshold=,warmup=. Examples:
|
||||||
Fn=,Bn=,threshold=,warmup=; spectrum: w=,m=,lam=,window=,flex=,warmup=,stop=.
|
"threshold=0.25" or "threshold=1.5,reset=0"
|
||||||
Examples: "threshold=0.25" or "threshold=1.5,reset=0"
|
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g., "1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
||||||
--scm-mask SCM steps mask for cache-dit: comma-separated 0/1 (e.g.,
|
|
||||||
"1,1,1,0,0,1,0,0,1,0") - 1=compute, 0=can cache
|
|
||||||
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
--scm-policy SCM policy: 'dynamic' (default) or 'static'
|
||||||
--vae-tile-size tile size for vae tiling, format [X]x[Y] (default: 32x32)
|
|
||||||
--vae-relative-tile-size relative tile size for vae tiling, format [X]x[Y], in fraction of image size
|
|
||||||
if < 1, in number of tiles per dim if >=1 (overrides --vae-tile-size)
|
|
||||||
```
|
```
|
||||||
|
|||||||
@ -38,8 +38,6 @@ Current generation-related endpoints include:
|
|||||||
- `POST /sdapi/v1/txt2img`
|
- `POST /sdapi/v1/txt2img`
|
||||||
- `POST /sdapi/v1/img2img`
|
- `POST /sdapi/v1/img2img`
|
||||||
- `GET /sdapi/v1/loras`
|
- `GET /sdapi/v1/loras`
|
||||||
- `GET /sdapi/v1/upscalers`
|
|
||||||
- `GET /sdapi/v1/latent-upscale-modes`
|
|
||||||
- `GET /sdapi/v1/samplers`
|
- `GET /sdapi/v1/samplers`
|
||||||
- `GET /sdapi/v1/schedulers`
|
- `GET /sdapi/v1/schedulers`
|
||||||
- `GET /sdapi/v1/sd-models`
|
- `GET /sdapi/v1/sd-models`
|
||||||
@ -218,13 +216,6 @@ Currently supported request fields:
|
|||||||
| `scheduler` | `string` | Scheduler name |
|
| `scheduler` | `string` | Scheduler name |
|
||||||
| `lora` | `array<object>` | Structured LoRA list |
|
| `lora` | `array<object>` | Structured LoRA list |
|
||||||
| `extra_images` | `array<string>` | Base64 or data URL images |
|
| `extra_images` | `array<string>` | Base64 or data URL images |
|
||||||
| `enable_hr` | `boolean` | Enable highres fix for `txt2img` |
|
|
||||||
| `hr_upscaler` | `string` | `Lanczos`, `Nearest`, a latent mode such as `Latent (nearest-exact)`, or an upscaler model name from `/sdapi/v1/upscalers` |
|
|
||||||
| `hr_scale` | `number` | Highres scale when resize target is not set |
|
|
||||||
| `hr_resize_x` | `integer` | Highres target width, `0` to use scale |
|
|
||||||
| `hr_resize_y` | `integer` | Highres target height, `0` to use scale |
|
|
||||||
| `hr_steps` | `integer` | Highres second-pass sample steps, `0` to reuse `steps` |
|
|
||||||
| `denoising_strength` | `number` | Highres denoising strength for `txt2img` |
|
|
||||||
|
|
||||||
Native extension fields:
|
Native extension fields:
|
||||||
|
|
||||||
@ -250,8 +241,6 @@ Currently supported request fields:
|
|||||||
| `inpainting_mask_invert` | `integer` or `boolean` | Treated as invert flag |
|
| `inpainting_mask_invert` | `integer` or `boolean` | Treated as invert flag |
|
||||||
| `denoising_strength` | `number` | Clamped to `0.0..1.0` |
|
| `denoising_strength` | `number` | Clamped to `0.0..1.0` |
|
||||||
|
|
||||||
Highres fix fields are currently handled for `txt2img`; `img2img` uses `denoising_strength` as image-to-image strength.
|
|
||||||
|
|
||||||
Native extension fields:
|
Native extension fields:
|
||||||
|
|
||||||
- any `sdcpp API` fields embedded through `sd_cpp_extra_args` inside `prompt`
|
- any `sdcpp API` fields embedded through `sd_cpp_extra_args` inside `prompt`
|
||||||
@ -269,8 +258,6 @@ Response fields:
|
|||||||
Currently exposed:
|
Currently exposed:
|
||||||
|
|
||||||
- `GET /sdapi/v1/loras`
|
- `GET /sdapi/v1/loras`
|
||||||
- `GET /sdapi/v1/upscalers`
|
|
||||||
- `GET /sdapi/v1/latent-upscale-modes`
|
|
||||||
- `GET /sdapi/v1/samplers`
|
- `GET /sdapi/v1/samplers`
|
||||||
- `GET /sdapi/v1/schedulers`
|
- `GET /sdapi/v1/schedulers`
|
||||||
- `GET /sdapi/v1/sd-models`
|
- `GET /sdapi/v1/sd-models`
|
||||||
@ -285,26 +272,6 @@ Response fields:
|
|||||||
| `[].name` | `string` | Display name derived from file stem |
|
| `[].name` | `string` | Display name derived from file stem |
|
||||||
| `[].path` | `string` | Relative path under the configured LoRA directory |
|
| `[].path` | `string` | Relative path under the configured LoRA directory |
|
||||||
|
|
||||||
`GET /sdapi/v1/upscalers`
|
|
||||||
|
|
||||||
| Field | Type | Notes |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `[].name` | `string` | Built-in name or model stem |
|
|
||||||
| `[].model_name` | `string \| null` | Model family label for model-backed upscalers |
|
|
||||||
| `[].model_path` | `string \| null` | Absolute model path for model-backed upscalers |
|
|
||||||
| `[].model_url` | `string \| null` | Currently always null |
|
|
||||||
| `[].scale` | `integer` | Currently `4` |
|
|
||||||
|
|
||||||
Built-in entries include `None`, `Lanczos`, and `Nearest`. Model-backed entries are scanned from the top level of `--hires-upscalers-dir`; subdirectories are not scanned.
|
|
||||||
|
|
||||||
`GET /sdapi/v1/latent-upscale-modes`
|
|
||||||
|
|
||||||
| Field | Type | Notes |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `[].name` | `string` | WebUI-compatible latent upscale mode name |
|
|
||||||
|
|
||||||
Built-in latent modes include `Latent`, `Latent (nearest)`, `Latent (nearest-exact)`, `Latent (antialiased)`, `Latent (bicubic)`, and `Latent (bicubic antialiased)`.
|
|
||||||
|
|
||||||
`GET /sdapi/v1/samplers`
|
`GET /sdapi/v1/samplers`
|
||||||
|
|
||||||
| Field | Type | Notes |
|
| Field | Type | Notes |
|
||||||
@ -421,7 +388,6 @@ Top-level fields:
|
|||||||
| `samplers` | `array<string>` | Available sampling methods |
|
| `samplers` | `array<string>` | Available sampling methods |
|
||||||
| `schedulers` | `array<string>` | Available schedulers |
|
| `schedulers` | `array<string>` | Available schedulers |
|
||||||
| `loras` | `array<object>` | Available LoRA entries |
|
| `loras` | `array<object>` | Available LoRA entries |
|
||||||
| `upscalers` | `array<object>` | Available model-backed highres upscalers |
|
|
||||||
| `limits` | `object` | Shared queue and size limits |
|
| `limits` | `object` | Shared queue and size limits |
|
||||||
|
|
||||||
`model`
|
`model`
|
||||||
@ -458,14 +424,6 @@ Shared nested fields:
|
|||||||
| `loras[].name` | `string` |
|
| `loras[].name` | `string` |
|
||||||
| `loras[].path` | `string` |
|
| `loras[].path` | `string` |
|
||||||
|
|
||||||
`upscalers`
|
|
||||||
|
|
||||||
| Field | Type | Notes |
|
|
||||||
| --- | --- | --- |
|
|
||||||
| `upscalers[].name` | `string` | Built-in name or model stem; use this value in `hires.upscaler` |
|
|
||||||
|
|
||||||
Built-in entries include `None`, `Lanczos`, `Nearest`, `Latent`, `Latent (nearest)`, `Latent (nearest-exact)`, `Latent (antialiased)`, `Latent (bicubic)`, and `Latent (bicubic antialiased)`. Model-backed entries are scanned from the top level of `--hires-upscalers-dir`; subdirectories are not scanned.
|
|
||||||
|
|
||||||
`limits`
|
`limits`
|
||||||
|
|
||||||
| Field | Type |
|
| Field | Type |
|
||||||
@ -524,15 +482,6 @@ Shared default fields used by both `img_gen` and `vid_gen`:
|
|||||||
| `auto_resize_ref_image` | `boolean` |
|
| `auto_resize_ref_image` | `boolean` |
|
||||||
| `increase_ref_index` | `boolean` |
|
| `increase_ref_index` | `boolean` |
|
||||||
| `control_strength` | `number` |
|
| `control_strength` | `number` |
|
||||||
| `hires` | `object` |
|
|
||||||
| `hires.enabled` | `boolean` |
|
|
||||||
| `hires.upscaler` | `string` |
|
|
||||||
| `hires.scale` | `number` |
|
|
||||||
| `hires.target_width` | `integer` |
|
|
||||||
| `hires.target_height` | `integer` |
|
|
||||||
| `hires.steps` | `integer` |
|
|
||||||
| `hires.denoising_strength` | `number` |
|
|
||||||
| `hires.upscale_tile_size` | `integer` |
|
|
||||||
|
|
||||||
`vid_gen`-specific default fields:
|
`vid_gen`-specific default fields:
|
||||||
|
|
||||||
@ -565,7 +514,6 @@ Fields returned in `features_by_mode.img_gen`:
|
|||||||
- `ref_images`
|
- `ref_images`
|
||||||
- `lora`
|
- `lora`
|
||||||
- `vae_tiling`
|
- `vae_tiling`
|
||||||
- `hires`
|
|
||||||
- `cache`
|
- `cache`
|
||||||
- `cancel_queued`
|
- `cancel_queued`
|
||||||
- `cancel_generating`
|
- `cancel_generating`
|
||||||
@ -677,16 +625,6 @@ Example:
|
|||||||
},
|
},
|
||||||
|
|
||||||
"lora": [],
|
"lora": [],
|
||||||
"hires": {
|
|
||||||
"enabled": false,
|
|
||||||
"upscaler": "Latent",
|
|
||||||
"scale": 2.0,
|
|
||||||
"target_width": 0,
|
|
||||||
"target_height": 0,
|
|
||||||
"steps": 0,
|
|
||||||
"denoising_strength": 0.7,
|
|
||||||
"upscale_tile_size": 128
|
|
||||||
},
|
|
||||||
|
|
||||||
"vae_tiling_params": {
|
"vae_tiling_params": {
|
||||||
"enabled": false,
|
"enabled": false,
|
||||||
@ -791,23 +729,12 @@ Other native fields:
|
|||||||
|
|
||||||
| Field | Type |
|
| Field | Type |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| `hires` | `object` |
|
|
||||||
| `hires.enabled` | `boolean` |
|
|
||||||
| `hires.upscaler` | `string` |
|
|
||||||
| `hires.scale` | `number` |
|
|
||||||
| `hires.target_width` | `integer` |
|
|
||||||
| `hires.target_height` | `integer` |
|
|
||||||
| `hires.steps` | `integer` |
|
|
||||||
| `hires.denoising_strength` | `number` |
|
|
||||||
| `hires.upscale_tile_size` | `integer` |
|
|
||||||
| `vae_tiling_params` | `object` |
|
| `vae_tiling_params` | `object` |
|
||||||
| `cache_mode` | `string` |
|
| `cache_mode` | `string` |
|
||||||
| `cache_option` | `string` |
|
| `cache_option` | `string` |
|
||||||
| `scm_mask` | `string` |
|
| `scm_mask` | `string` |
|
||||||
| `scm_policy_dynamic` | `boolean` |
|
| `scm_policy_dynamic` | `boolean` |
|
||||||
|
|
||||||
For `hires.upscaler`, use `Lanczos`, `Nearest`, `Latent`, `Latent (nearest)`, `Latent (nearest-exact)`, `Latent (antialiased)`, `Latent (bicubic)`, `Latent (bicubic antialiased)`, or an `upscalers[].name` value from `GET /sdcpp/v1/capabilities`. Model-backed upscalers are resolved as `--hires-upscalers-dir / (name + ext)` and must live directly in that directory.
|
|
||||||
|
|
||||||
HTTP-only output fields:
|
HTTP-only output fields:
|
||||||
|
|
||||||
| Field | Type |
|
| Field | Type |
|
||||||
|
|||||||
@ -48,9 +48,7 @@ static void parse_args(int argc,
|
|||||||
|
|
||||||
if (!svr_params.resolve_and_validate() ||
|
if (!svr_params.resolve_and_validate() ||
|
||||||
!ctx_params.resolve_and_validate(IMG_GEN) ||
|
!ctx_params.resolve_and_validate(IMG_GEN) ||
|
||||||
!default_gen_params.resolve_and_validate(IMG_GEN,
|
!default_gen_params.resolve_and_validate(IMG_GEN, ctx_params.lora_model_dir)) {
|
||||||
ctx_params.lora_model_dir,
|
|
||||||
ctx_params.hires_upscalers_dir)) {
|
|
||||||
print_usage(argv[0], options_vec);
|
print_usage(argv[0], options_vec);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
@ -97,8 +95,6 @@ int main(int argc, const char** argv) {
|
|||||||
|
|
||||||
std::vector<LoraEntry> lora_cache;
|
std::vector<LoraEntry> lora_cache;
|
||||||
std::mutex lora_mutex;
|
std::mutex lora_mutex;
|
||||||
std::vector<UpscalerEntry> upscaler_cache;
|
|
||||||
std::mutex upscaler_mutex;
|
|
||||||
AsyncJobManager async_job_manager;
|
AsyncJobManager async_job_manager;
|
||||||
ServerRuntime runtime = {
|
ServerRuntime runtime = {
|
||||||
sd_ctx.get(),
|
sd_ctx.get(),
|
||||||
@ -108,8 +104,6 @@ int main(int argc, const char** argv) {
|
|||||||
&default_gen_params,
|
&default_gen_params,
|
||||||
&lora_cache,
|
&lora_cache,
|
||||||
&lora_mutex,
|
&lora_mutex,
|
||||||
&upscaler_cache,
|
|
||||||
&upscaler_mutex,
|
|
||||||
&async_job_manager,
|
&async_job_manager,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -145,7 +139,7 @@ int main(int argc, const char** argv) {
|
|||||||
register_sdapi_endpoints(svr, runtime);
|
register_sdapi_endpoints(svr, runtime);
|
||||||
register_sdcpp_api_endpoints(svr, runtime);
|
register_sdcpp_api_endpoints(svr, runtime);
|
||||||
|
|
||||||
LOG_INFO("listening on: http://%s:%d\n", svr_params.listen_ip.c_str(), svr_params.listen_port);
|
LOG_INFO("listening on: %s:%d\n", svr_params.listen_ip.c_str(), svr_params.listen_port);
|
||||||
svr.listen(svr_params.listen_ip, svr_params.listen_port);
|
svr.listen(svr_params.listen_ip, svr_params.listen_port);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|||||||
@ -70,7 +70,7 @@ static bool build_openai_generation_request(const httplib::Request& req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
||||||
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", runtime.ctx_params->hires_upscalers_dir, true)) {
|
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", true)) {
|
||||||
error_message = "invalid params";
|
error_message = "invalid params";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -212,7 +212,7 @@ static bool build_openai_edit_request(const httplib::Request& req,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
||||||
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", runtime.ctx_params->hires_upscalers_dir, true)) {
|
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", true)) {
|
||||||
error_message = "invalid params";
|
error_message = "invalid params";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
#include "routes.h"
|
#include "routes.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cctype>
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
@ -36,20 +35,14 @@ static fs::path resolve_display_model_path(const ServerRuntime& runtime) {
|
|||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::string lower_ascii(std::string value) {
|
|
||||||
std::transform(value.begin(), value.end(), value.begin(), [](unsigned char c) {
|
|
||||||
return static_cast<char>(std::tolower(c));
|
|
||||||
});
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum sample_method_t get_sdapi_sample_method(std::string name) {
|
static enum sample_method_t get_sdapi_sample_method(std::string name) {
|
||||||
enum sample_method_t result = str_to_sample_method(name.c_str());
|
enum sample_method_t result = str_to_sample_method(name.c_str());
|
||||||
if (result != SAMPLE_METHOD_COUNT) {
|
if (result != SAMPLE_METHOD_COUNT) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
name = lower_ascii(name);
|
std::transform(name.begin(), name.end(), name.begin(),
|
||||||
|
[](unsigned char c) { return static_cast<char>(std::tolower(c)); });
|
||||||
static const std::unordered_map<std::string_view, sample_method_t> hardcoded{
|
static const std::unordered_map<std::string_view, sample_method_t> hardcoded{
|
||||||
{"euler a", EULER_A_SAMPLE_METHOD},
|
{"euler a", EULER_A_SAMPLE_METHOD},
|
||||||
{"k_euler_a", EULER_A_SAMPLE_METHOD},
|
{"k_euler_a", EULER_A_SAMPLE_METHOD},
|
||||||
@ -67,10 +60,6 @@ static enum sample_method_t get_sdapi_sample_method(std::string name) {
|
|||||||
{"k_res_multistep", RES_MULTISTEP_SAMPLE_METHOD},
|
{"k_res_multistep", RES_MULTISTEP_SAMPLE_METHOD},
|
||||||
{"res 2s", RES_2S_SAMPLE_METHOD},
|
{"res 2s", RES_2S_SAMPLE_METHOD},
|
||||||
{"k_res_2s", RES_2S_SAMPLE_METHOD},
|
{"k_res_2s", RES_2S_SAMPLE_METHOD},
|
||||||
{"euler_cfg_pp", EULER_CFG_PP_SAMPLE_METHOD},
|
|
||||||
{"k_euler_cfg_pp", EULER_CFG_PP_SAMPLE_METHOD},
|
|
||||||
{"euler_a_cfg_pp", EULER_CFG_PP_SAMPLE_METHOD},
|
|
||||||
{"k_euler_a_cfg_pp", EULER_CFG_PP_SAMPLE_METHOD},
|
|
||||||
};
|
};
|
||||||
auto it = hardcoded.find(name);
|
auto it = hardcoded.find(name);
|
||||||
return it != hardcoded.end() ? it->second : SAMPLE_METHOD_COUNT;
|
return it != hardcoded.end() ? it->second : SAMPLE_METHOD_COUNT;
|
||||||
@ -125,18 +114,6 @@ static bool build_sdapi_img_gen_request(const json& j,
|
|||||||
request.gen_params.width = j.value("width", -1);
|
request.gen_params.width = j.value("width", -1);
|
||||||
request.gen_params.height = j.value("height", -1);
|
request.gen_params.height = j.value("height", -1);
|
||||||
|
|
||||||
if (!img2img && j.value("enable_hr", false)) {
|
|
||||||
request.gen_params.hires_enabled = true;
|
|
||||||
request.gen_params.hires_scale = j.value("hr_scale", request.gen_params.hires_scale);
|
|
||||||
request.gen_params.hires_width = j.value("hr_resize_x", request.gen_params.hires_width);
|
|
||||||
request.gen_params.hires_height = j.value("hr_resize_y", request.gen_params.hires_height);
|
|
||||||
request.gen_params.hires_steps = j.value("hr_steps", request.gen_params.hires_steps);
|
|
||||||
request.gen_params.hires_denoising_strength =
|
|
||||||
j.value("denoising_strength", request.gen_params.hires_denoising_strength);
|
|
||||||
|
|
||||||
request.gen_params.hires_upscaler = j.value("hr_upscaler", request.gen_params.hires_upscaler);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(request.gen_params.prompt);
|
std::string sd_cpp_extra_args_str = extract_and_remove_sd_cpp_extra_args(request.gen_params.prompt);
|
||||||
if (!sd_cpp_extra_args_str.empty() && !request.gen_params.from_json_str(sd_cpp_extra_args_str)) {
|
if (!sd_cpp_extra_args_str.empty() && !request.gen_params.from_json_str(sd_cpp_extra_args_str)) {
|
||||||
error_message = "invalid sd_cpp_extra_args";
|
error_message = "invalid sd_cpp_extra_args";
|
||||||
@ -251,7 +228,7 @@ static bool build_sdapi_img_gen_request(const json& j,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
||||||
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", runtime.ctx_params->hires_upscalers_dir, true)) {
|
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", true)) {
|
||||||
error_message = "invalid params";
|
error_message = "invalid params";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -370,52 +347,6 @@ void register_sdapi_endpoints(httplib::Server& svr, ServerRuntime& rt) {
|
|||||||
res.set_content(result.dump(), "application/json");
|
res.set_content(result.dump(), "application/json");
|
||||||
});
|
});
|
||||||
|
|
||||||
svr.Get("/sdapi/v1/upscalers", [runtime](const httplib::Request&, httplib::Response& res) {
|
|
||||||
refresh_upscaler_cache(*runtime);
|
|
||||||
|
|
||||||
auto make_builtin = [](const char* name) {
|
|
||||||
json item;
|
|
||||||
item["name"] = name;
|
|
||||||
item["model_name"] = nullptr;
|
|
||||||
item["model_path"] = nullptr;
|
|
||||||
item["model_url"] = nullptr;
|
|
||||||
item["scale"] = 4;
|
|
||||||
return item;
|
|
||||||
};
|
|
||||||
|
|
||||||
json result = json::array();
|
|
||||||
result.push_back(make_builtin("None"));
|
|
||||||
result.push_back(make_builtin("Lanczos"));
|
|
||||||
result.push_back(make_builtin("Nearest"));
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(*runtime->upscaler_mutex);
|
|
||||||
for (const auto& e : *runtime->upscaler_cache) {
|
|
||||||
json item;
|
|
||||||
item["name"] = e.name;
|
|
||||||
item["model_name"] = e.model_name;
|
|
||||||
item["model_path"] = e.fullpath;
|
|
||||||
item["model_url"] = nullptr;
|
|
||||||
item["scale"] = e.scale;
|
|
||||||
result.push_back(item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res.set_content(result.dump(), "application/json");
|
|
||||||
});
|
|
||||||
|
|
||||||
svr.Get("/sdapi/v1/latent-upscale-modes", [](const httplib::Request&, httplib::Response& res) {
|
|
||||||
json result = json::array({
|
|
||||||
{{"name", "Latent"}},
|
|
||||||
{{"name", "Latent (nearest)"}},
|
|
||||||
{{"name", "Latent (nearest-exact)"}},
|
|
||||||
{{"name", "Latent (antialiased)"}},
|
|
||||||
{{"name", "Latent (bicubic)"}},
|
|
||||||
{{"name", "Latent (bicubic antialiased)"}},
|
|
||||||
});
|
|
||||||
res.set_content(result.dump(), "application/json");
|
|
||||||
});
|
|
||||||
|
|
||||||
svr.Get("/sdapi/v1/samplers", [runtime](const httplib::Request&, httplib::Response& res) {
|
svr.Get("/sdapi/v1/samplers", [runtime](const httplib::Request&, httplib::Response& res) {
|
||||||
std::vector<std::string> sampler_names;
|
std::vector<std::string> sampler_names;
|
||||||
sampler_names.push_back("default");
|
sampler_names.push_back("default");
|
||||||
|
|||||||
@ -114,17 +114,6 @@ static json make_img_gen_defaults_json(const SDGenerationParams& defaults, const
|
|||||||
{"increase_ref_index", defaults.increase_ref_index},
|
{"increase_ref_index", defaults.increase_ref_index},
|
||||||
{"control_strength", defaults.control_strength},
|
{"control_strength", defaults.control_strength},
|
||||||
{"sample_params", make_sample_params_json(defaults.sample_params, defaults.skip_layers)},
|
{"sample_params", make_sample_params_json(defaults.sample_params, defaults.skip_layers)},
|
||||||
{"hires",
|
|
||||||
{
|
|
||||||
{"enabled", defaults.hires_enabled},
|
|
||||||
{"upscaler", defaults.hires_upscaler},
|
|
||||||
{"scale", defaults.hires_scale},
|
|
||||||
{"target_width", defaults.hires_width},
|
|
||||||
{"target_height", defaults.hires_height},
|
|
||||||
{"steps", defaults.hires_steps},
|
|
||||||
{"denoising_strength", defaults.hires_denoising_strength},
|
|
||||||
{"upscale_tile_size", defaults.hires_upscale_tile_size},
|
|
||||||
}},
|
|
||||||
{"vae_tiling_params", make_vae_tiling_json(defaults.vae_tiling_params)},
|
{"vae_tiling_params", make_vae_tiling_json(defaults.vae_tiling_params)},
|
||||||
{"cache_mode", defaults.cache_mode},
|
{"cache_mode", defaults.cache_mode},
|
||||||
{"cache_option", defaults.cache_option},
|
{"cache_option", defaults.cache_option},
|
||||||
@ -168,7 +157,6 @@ static json make_img_gen_features_json() {
|
|||||||
{"ref_images", true},
|
{"ref_images", true},
|
||||||
{"lora", true},
|
{"lora", true},
|
||||||
{"vae_tiling", true},
|
{"vae_tiling", true},
|
||||||
{"hires", true},
|
|
||||||
{"cache", true},
|
{"cache", true},
|
||||||
{"cancel_queued", true},
|
{"cancel_queued", true},
|
||||||
{"cancel_generating", false},
|
{"cancel_generating", false},
|
||||||
@ -191,7 +179,6 @@ static json make_vid_gen_features_json() {
|
|||||||
|
|
||||||
static json make_capabilities_json(ServerRuntime& runtime) {
|
static json make_capabilities_json(ServerRuntime& runtime) {
|
||||||
refresh_lora_cache(runtime);
|
refresh_lora_cache(runtime);
|
||||||
refresh_upscaler_cache(runtime);
|
|
||||||
|
|
||||||
AsyncJobManager& manager = *runtime.async_job_manager;
|
AsyncJobManager& manager = *runtime.async_job_manager;
|
||||||
const auto& defaults = *runtime.default_gen_params;
|
const auto& defaults = *runtime.default_gen_params;
|
||||||
@ -203,7 +190,6 @@ static json make_capabilities_json(ServerRuntime& runtime) {
|
|||||||
json image_output_formats = supported_img_output_formats();
|
json image_output_formats = supported_img_output_formats();
|
||||||
json video_output_formats = supported_vid_output_formats();
|
json video_output_formats = supported_vid_output_formats();
|
||||||
json available_loras = json::array();
|
json available_loras = json::array();
|
||||||
json available_upscalers = json::array();
|
|
||||||
json supported_modes = json::array();
|
json supported_modes = json::array();
|
||||||
|
|
||||||
for (int i = 0; i < SAMPLE_METHOD_COUNT; ++i) {
|
for (int i = 0; i < SAMPLE_METHOD_COUNT; ++i) {
|
||||||
@ -224,42 +210,6 @@ static json make_capabilities_json(ServerRuntime& runtime) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "None"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Lanczos"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Nearest"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Latent"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Latent (nearest)"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Latent (nearest-exact)"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Latent (antialiased)"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Latent (bicubic)"},
|
|
||||||
});
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", "Latent (bicubic antialiased)"},
|
|
||||||
});
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(*runtime.upscaler_mutex);
|
|
||||||
for (const auto& entry : *runtime.upscaler_cache) {
|
|
||||||
available_upscalers.push_back({
|
|
||||||
{"name", entry.name},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (supports_img) {
|
if (supports_img) {
|
||||||
supported_modes.push_back("img_gen");
|
supported_modes.push_back("img_gen");
|
||||||
}
|
}
|
||||||
@ -334,7 +284,6 @@ static json make_capabilities_json(ServerRuntime& runtime) {
|
|||||||
result["features"] = top_level_features;
|
result["features"] = top_level_features;
|
||||||
result["features_by_mode"] = features_by_mode;
|
result["features_by_mode"] = features_by_mode;
|
||||||
result["loras"] = available_loras;
|
result["loras"] = available_loras;
|
||||||
result["upscalers"] = available_upscalers;
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -358,7 +307,7 @@ static bool parse_img_gen_request(const json& body,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
||||||
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", runtime.ctx_params->hires_upscalers_dir, true)) {
|
if (!request.gen_params.resolve_and_validate(IMG_GEN, "", true)) {
|
||||||
error_message = "invalid generation parameters";
|
error_message = "invalid generation parameters";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -385,7 +334,7 @@ static bool parse_vid_gen_request(const json& body,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
// Intentionally disable prompt-embedded LoRA tag parsing for server APIs.
|
||||||
if (!request.gen_params.resolve_and_validate(VID_GEN, "", runtime.ctx_params->hires_upscalers_dir, true)) {
|
if (!request.gen_params.resolve_and_validate(VID_GEN, "", true)) {
|
||||||
error_message = "invalid generation parameters";
|
error_message = "invalid generation parameters";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
#include "runtime.h"
|
#include "runtime.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cctype>
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
@ -14,18 +13,6 @@
|
|||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
|
|
||||||
static std::string lower_ascii(std::string value) {
|
|
||||||
std::transform(value.begin(), value.end(), value.begin(), [](unsigned char c) {
|
|
||||||
return static_cast<char>(std::tolower(c));
|
|
||||||
});
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool is_supported_model_ext(const fs::path& p) {
|
|
||||||
auto ext = lower_ascii(p.extension().string());
|
|
||||||
return ext == ".gguf" || ext == ".pt" || ext == ".pth" || ext == ".safetensors";
|
|
||||||
}
|
|
||||||
|
|
||||||
static const std::string k_base64_chars =
|
static const std::string k_base64_chars =
|
||||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
"abcdefghijklmnopqrstuvwxyz"
|
"abcdefghijklmnopqrstuvwxyz"
|
||||||
@ -254,12 +241,20 @@ void refresh_lora_cache(ServerRuntime& rt) {
|
|||||||
|
|
||||||
fs::path lora_dir = rt.ctx_params->lora_model_dir;
|
fs::path lora_dir = rt.ctx_params->lora_model_dir;
|
||||||
if (fs::exists(lora_dir) && fs::is_directory(lora_dir)) {
|
if (fs::exists(lora_dir) && fs::is_directory(lora_dir)) {
|
||||||
|
auto is_lora_ext = [](const fs::path& p) {
|
||||||
|
auto ext = p.extension().string();
|
||||||
|
std::transform(ext.begin(), ext.end(), ext.begin(), [](unsigned char c) {
|
||||||
|
return static_cast<char>(std::tolower(c));
|
||||||
|
});
|
||||||
|
return ext == ".gguf" || ext == ".pt" || ext == ".pth" || ext == ".safetensors";
|
||||||
|
};
|
||||||
|
|
||||||
for (auto& entry : fs::recursive_directory_iterator(lora_dir)) {
|
for (auto& entry : fs::recursive_directory_iterator(lora_dir)) {
|
||||||
if (!entry.is_regular_file()) {
|
if (!entry.is_regular_file()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const fs::path& p = entry.path();
|
const fs::path& p = entry.path();
|
||||||
if (!is_supported_model_ext(p)) {
|
if (!is_lora_ext(p)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,40 +286,6 @@ std::string get_lora_full_path(ServerRuntime& rt, const std::string& path) {
|
|||||||
return it != rt.lora_cache->end() ? it->fullpath : "";
|
return it != rt.lora_cache->end() ? it->fullpath : "";
|
||||||
}
|
}
|
||||||
|
|
||||||
void refresh_upscaler_cache(ServerRuntime& rt) {
|
|
||||||
std::vector<UpscalerEntry> new_cache;
|
|
||||||
|
|
||||||
fs::path upscaler_dir = rt.ctx_params->hires_upscalers_dir;
|
|
||||||
if (fs::exists(upscaler_dir) && fs::is_directory(upscaler_dir)) {
|
|
||||||
for (auto& entry : fs::directory_iterator(upscaler_dir)) {
|
|
||||||
if (!entry.is_regular_file()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const fs::path& p = entry.path();
|
|
||||||
if (!is_supported_model_ext(p)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
UpscalerEntry upscaler_entry;
|
|
||||||
upscaler_entry.name = p.stem().u8string();
|
|
||||||
upscaler_entry.fullpath = fs::absolute(p).lexically_normal().u8string();
|
|
||||||
upscaler_entry.model_name = "ESRGAN_4x";
|
|
||||||
upscaler_entry.path = p.filename().u8string();
|
|
||||||
|
|
||||||
new_cache.push_back(std::move(upscaler_entry));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::sort(new_cache.begin(), new_cache.end(), [](const UpscalerEntry& a, const UpscalerEntry& b) {
|
|
||||||
return a.name < b.name;
|
|
||||||
});
|
|
||||||
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(*rt.upscaler_mutex);
|
|
||||||
*rt.upscaler_cache = std::move(new_cache);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t unix_timestamp_now() {
|
int64_t unix_timestamp_now() {
|
||||||
return std::chrono::duration_cast<std::chrono::seconds>(
|
return std::chrono::duration_cast<std::chrono::seconds>(
|
||||||
std::chrono::system_clock::now().time_since_epoch())
|
std::chrono::system_clock::now().time_since_epoch())
|
||||||
|
|||||||
@ -37,14 +37,6 @@ struct LoraEntry {
|
|||||||
std::string fullpath;
|
std::string fullpath;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct UpscalerEntry {
|
|
||||||
std::string name;
|
|
||||||
std::string path;
|
|
||||||
std::string fullpath;
|
|
||||||
std::string model_name;
|
|
||||||
int scale = 4;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ServerRuntime {
|
struct ServerRuntime {
|
||||||
sd_ctx_t* sd_ctx;
|
sd_ctx_t* sd_ctx;
|
||||||
std::mutex* sd_ctx_mutex;
|
std::mutex* sd_ctx_mutex;
|
||||||
@ -53,8 +45,6 @@ struct ServerRuntime {
|
|||||||
const SDGenerationParams* default_gen_params;
|
const SDGenerationParams* default_gen_params;
|
||||||
std::vector<LoraEntry>* lora_cache;
|
std::vector<LoraEntry>* lora_cache;
|
||||||
std::mutex* lora_mutex;
|
std::mutex* lora_mutex;
|
||||||
std::vector<UpscalerEntry>* upscaler_cache;
|
|
||||||
std::mutex* upscaler_mutex;
|
|
||||||
AsyncJobManager* async_job_manager;
|
AsyncJobManager* async_job_manager;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -96,5 +86,4 @@ bool runtime_supports_generation_mode(const ServerRuntime& runtime, SDMode mode)
|
|||||||
std::string unsupported_generation_mode_error(SDMode mode);
|
std::string unsupported_generation_mode_error(SDMode mode);
|
||||||
void refresh_lora_cache(ServerRuntime& rt);
|
void refresh_lora_cache(ServerRuntime& rt);
|
||||||
std::string get_lora_full_path(ServerRuntime& rt, const std::string& path);
|
std::string get_lora_full_path(ServerRuntime& rt, const std::string& path);
|
||||||
void refresh_upscaler_cache(ServerRuntime& rt);
|
|
||||||
int64_t unix_timestamp_now();
|
int64_t unix_timestamp_now();
|
||||||
|
|||||||
@ -51,8 +51,6 @@ enum sample_method_t {
|
|||||||
RES_MULTISTEP_SAMPLE_METHOD,
|
RES_MULTISTEP_SAMPLE_METHOD,
|
||||||
RES_2S_SAMPLE_METHOD,
|
RES_2S_SAMPLE_METHOD,
|
||||||
ER_SDE_SAMPLE_METHOD,
|
ER_SDE_SAMPLE_METHOD,
|
||||||
EULER_CFG_PP_SAMPLE_METHOD,
|
|
||||||
EULER_A_CFG_PP_SAMPLE_METHOD,
|
|
||||||
SAMPLE_METHOD_COUNT
|
SAMPLE_METHOD_COUNT
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -205,7 +203,6 @@ typedef struct {
|
|||||||
bool chroma_use_t5_mask;
|
bool chroma_use_t5_mask;
|
||||||
int chroma_t5_mask_pad;
|
int chroma_t5_mask_pad;
|
||||||
bool qwen_image_zero_cond_t;
|
bool qwen_image_zero_cond_t;
|
||||||
float max_vram;
|
|
||||||
} sd_ctx_params_t;
|
} sd_ctx_params_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -240,7 +237,6 @@ typedef struct {
|
|||||||
float* custom_sigmas;
|
float* custom_sigmas;
|
||||||
int custom_sigmas_count;
|
int custom_sigmas_count;
|
||||||
float flow_shift;
|
float flow_shift;
|
||||||
const char* extra_sample_args;
|
|
||||||
} sd_sample_params_t;
|
} sd_sample_params_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -293,32 +289,6 @@ typedef struct {
|
|||||||
const char* path;
|
const char* path;
|
||||||
} sd_lora_t;
|
} sd_lora_t;
|
||||||
|
|
||||||
enum sd_hires_upscaler_t {
|
|
||||||
SD_HIRES_UPSCALER_NONE,
|
|
||||||
SD_HIRES_UPSCALER_LATENT,
|
|
||||||
SD_HIRES_UPSCALER_LATENT_NEAREST,
|
|
||||||
SD_HIRES_UPSCALER_LATENT_NEAREST_EXACT,
|
|
||||||
SD_HIRES_UPSCALER_LATENT_ANTIALIASED,
|
|
||||||
SD_HIRES_UPSCALER_LATENT_BICUBIC,
|
|
||||||
SD_HIRES_UPSCALER_LATENT_BICUBIC_ANTIALIASED,
|
|
||||||
SD_HIRES_UPSCALER_LANCZOS,
|
|
||||||
SD_HIRES_UPSCALER_NEAREST,
|
|
||||||
SD_HIRES_UPSCALER_MODEL,
|
|
||||||
SD_HIRES_UPSCALER_COUNT,
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
bool enabled;
|
|
||||||
enum sd_hires_upscaler_t upscaler;
|
|
||||||
const char* model_path;
|
|
||||||
float scale;
|
|
||||||
int target_width;
|
|
||||||
int target_height;
|
|
||||||
int steps;
|
|
||||||
float denoising_strength;
|
|
||||||
int upscale_tile_size;
|
|
||||||
} sd_hires_params_t;
|
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
const sd_lora_t* loras;
|
const sd_lora_t* loras;
|
||||||
uint32_t lora_count;
|
uint32_t lora_count;
|
||||||
@ -342,7 +312,6 @@ typedef struct {
|
|||||||
sd_pm_params_t pm_params;
|
sd_pm_params_t pm_params;
|
||||||
sd_tiling_params_t vae_tiling_params;
|
sd_tiling_params_t vae_tiling_params;
|
||||||
sd_cache_params_t cache;
|
sd_cache_params_t cache;
|
||||||
sd_hires_params_t hires;
|
|
||||||
} sd_img_gen_params_t;
|
} sd_img_gen_params_t;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
@ -396,11 +365,8 @@ SD_API const char* sd_preview_name(enum preview_t preview);
|
|||||||
SD_API enum preview_t str_to_preview(const char* str);
|
SD_API enum preview_t str_to_preview(const char* str);
|
||||||
SD_API const char* sd_lora_apply_mode_name(enum lora_apply_mode_t mode);
|
SD_API const char* sd_lora_apply_mode_name(enum lora_apply_mode_t mode);
|
||||||
SD_API enum lora_apply_mode_t str_to_lora_apply_mode(const char* str);
|
SD_API enum lora_apply_mode_t str_to_lora_apply_mode(const char* str);
|
||||||
SD_API const char* sd_hires_upscaler_name(enum sd_hires_upscaler_t upscaler);
|
|
||||||
SD_API enum sd_hires_upscaler_t str_to_sd_hires_upscaler(const char* str);
|
|
||||||
|
|
||||||
SD_API void sd_cache_params_init(sd_cache_params_t* cache_params);
|
SD_API void sd_cache_params_init(sd_cache_params_t* cache_params);
|
||||||
SD_API void sd_hires_params_init(sd_hires_params_t* hires_params);
|
|
||||||
|
|
||||||
SD_API void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params);
|
SD_API void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params);
|
||||||
SD_API char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params);
|
SD_API char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params);
|
||||||
|
|||||||
@ -499,15 +499,9 @@ namespace Anima {
|
|||||||
encoder_hidden_states = adapted_context;
|
encoder_hidden_states = adapted_context;
|
||||||
}
|
}
|
||||||
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "anima.prelude", "x");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(embedded_timestep, "anima.prelude", "embedded_timestep");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(temb, "anima.prelude", "temb");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(encoder_hidden_states, "anima.prelude", "context");
|
|
||||||
|
|
||||||
for (int i = 0; i < num_layers; i++) {
|
for (int i = 0; i < num_layers; i++) {
|
||||||
auto block = std::dynamic_pointer_cast<TransformerBlock>(blocks["blocks." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<TransformerBlock>(blocks["blocks." + std::to_string(i)]);
|
||||||
x = block->forward(ctx, x, encoder_hidden_states, embedded_timestep, temb, image_pe);
|
x = block->forward(ctx, x, encoder_hidden_states, embedded_timestep, temb, image_pe);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "anima.blocks." + std::to_string(i), "x");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
x = final_layer->forward(ctx, x, embedded_timestep, temb); // [N, h*w, ph*pw*C]
|
x = final_layer->forward(ctx, x, embedded_timestep, temb); // [N, h*w, ph*pw*C]
|
||||||
|
|||||||
@ -328,7 +328,6 @@ public:
|
|||||||
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
auto conv_out = std::dynamic_pointer_cast<Conv2d>(blocks["conv_out"]);
|
||||||
|
|
||||||
auto h = conv_in->forward(ctx, x); // [N, ch, h, w]
|
auto h = conv_in->forward(ctx, x); // [N, ch, h, w]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.prelude", "h");
|
|
||||||
|
|
||||||
// downsampling
|
// downsampling
|
||||||
size_t num_resolutions = ch_mult.size();
|
size_t num_resolutions = ch_mult.size();
|
||||||
@ -338,14 +337,12 @@ public:
|
|||||||
auto down_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
auto down_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
||||||
|
|
||||||
h = down_block->forward(ctx, h);
|
h = down_block->forward(ctx, h);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.down." + std::to_string(i) + ".block." + std::to_string(j), "h");
|
|
||||||
}
|
}
|
||||||
if (i != num_resolutions - 1) {
|
if (i != num_resolutions - 1) {
|
||||||
std::string name = "down." + std::to_string(i) + ".downsample";
|
std::string name = "down." + std::to_string(i) + ".downsample";
|
||||||
auto down_sample = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
auto down_sample = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
||||||
|
|
||||||
h = down_sample->forward(ctx, h);
|
h = down_sample->forward(ctx, h);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.down." + std::to_string(i) + ".downsample", "h");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,7 +350,6 @@ public:
|
|||||||
h = mid_block_1->forward(ctx, h);
|
h = mid_block_1->forward(ctx, h);
|
||||||
h = mid_attn_1->forward(ctx, h);
|
h = mid_attn_1->forward(ctx, h);
|
||||||
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.mid", "h");
|
|
||||||
|
|
||||||
// end
|
// end
|
||||||
h = norm_out->forward(ctx, h);
|
h = norm_out->forward(ctx, h);
|
||||||
@ -454,7 +450,6 @@ public:
|
|||||||
|
|
||||||
// conv_in
|
// conv_in
|
||||||
auto h = conv_in->forward(ctx, z); // [N, block_in, h, w]
|
auto h = conv_in->forward(ctx, z); // [N, block_in, h, w]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.prelude", "h");
|
|
||||||
|
|
||||||
// middle
|
// middle
|
||||||
h = mid_block_1->forward(ctx, h);
|
h = mid_block_1->forward(ctx, h);
|
||||||
@ -462,7 +457,6 @@ public:
|
|||||||
|
|
||||||
h = mid_attn_1->forward(ctx, h);
|
h = mid_attn_1->forward(ctx, h);
|
||||||
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
h = mid_block_2->forward(ctx, h); // [N, block_in, h, w]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.mid", "h");
|
|
||||||
|
|
||||||
// upsampling
|
// upsampling
|
||||||
int num_resolutions = static_cast<int>(ch_mult.size());
|
int num_resolutions = static_cast<int>(ch_mult.size());
|
||||||
@ -472,14 +466,12 @@ public:
|
|||||||
auto up_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
auto up_block = std::dynamic_pointer_cast<ResnetBlock>(blocks[name]);
|
||||||
|
|
||||||
h = up_block->forward(ctx, h);
|
h = up_block->forward(ctx, h);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.up." + std::to_string(i) + ".block." + std::to_string(j), "h");
|
|
||||||
}
|
}
|
||||||
if (i != 0) {
|
if (i != 0) {
|
||||||
std::string name = "up." + std::to_string(i) + ".upsample";
|
std::string name = "up." + std::to_string(i) + ".upsample";
|
||||||
auto up_sample = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]);
|
auto up_sample = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]);
|
||||||
|
|
||||||
h = up_sample->forward(ctx, h);
|
h = up_sample->forward(ctx, h);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.up." + std::to_string(i) + ".upsample", "h");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -607,7 +599,6 @@ public:
|
|||||||
if (use_quant) {
|
if (use_quant) {
|
||||||
auto post_quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["post_quant_conv"]);
|
auto post_quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["post_quant_conv"]);
|
||||||
z = post_quant_conv->forward(ctx, z); // [N, z_channels, h, w]
|
z = post_quant_conv->forward(ctx, z); // [N, z_channels, h, w]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(z, "vae.decode.prelude", "z");
|
|
||||||
}
|
}
|
||||||
auto decoder = std::dynamic_pointer_cast<Decoder>(blocks["decoder"]);
|
auto decoder = std::dynamic_pointer_cast<Decoder>(blocks["decoder"]);
|
||||||
|
|
||||||
@ -625,7 +616,6 @@ public:
|
|||||||
if (use_quant) {
|
if (use_quant) {
|
||||||
auto quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["quant_conv"]);
|
auto quant_conv = std::dynamic_pointer_cast<Conv2d>(blocks["quant_conv"]);
|
||||||
z = quant_conv->forward(ctx, z); // [N, 2*embed_dim, h/8, w/8]
|
z = quant_conv->forward(ctx, z); // [N, 2*embed_dim, h/8, w/8]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(z, "vae.encode.final", "z");
|
|
||||||
}
|
}
|
||||||
if (sd_version_uses_flux2_vae(version)) {
|
if (sd_version_uses_flux2_vae(version)) {
|
||||||
z = ggml_ext_chunk(ctx->ggml_ctx, z, 2, 2)[0];
|
z = ggml_ext_chunk(ctx->ggml_ctx, z, 2, 2)[0];
|
||||||
|
|||||||
12
src/clip.hpp
12
src/clip.hpp
@ -96,8 +96,7 @@ public:
|
|||||||
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
ggml_tensor* forward(GGMLRunnerContext* ctx,
|
||||||
ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
ggml_tensor* mask = nullptr,
|
ggml_tensor* mask = nullptr,
|
||||||
int clip_skip = -1,
|
int clip_skip = -1) {
|
||||||
const std::string& graph_cut_prefix = "") {
|
|
||||||
// x: [N, n_token, d_model]
|
// x: [N, n_token, d_model]
|
||||||
int layer_idx = n_layer - 1;
|
int layer_idx = n_layer - 1;
|
||||||
// LOG_DEBUG("clip_skip %d", clip_skip);
|
// LOG_DEBUG("clip_skip %d", clip_skip);
|
||||||
@ -113,9 +112,6 @@ public:
|
|||||||
std::string name = "layers." + std::to_string(i);
|
std::string name = "layers." + std::to_string(i);
|
||||||
auto layer = std::dynamic_pointer_cast<CLIPLayer>(blocks[name]);
|
auto layer = std::dynamic_pointer_cast<CLIPLayer>(blocks[name]);
|
||||||
x = layer->forward(ctx, x, mask); // [N, n_token, d_model]
|
x = layer->forward(ctx, x, mask); // [N, n_token, d_model]
|
||||||
if (!graph_cut_prefix.empty()) {
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, graph_cut_prefix + ".layers." + std::to_string(i), "x");
|
|
||||||
}
|
|
||||||
// LOG_DEBUG("layer %d", i);
|
// LOG_DEBUG("layer %d", i);
|
||||||
}
|
}
|
||||||
return x;
|
return x;
|
||||||
@ -308,8 +304,7 @@ public:
|
|||||||
auto final_layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["final_layer_norm"]);
|
auto final_layer_norm = std::dynamic_pointer_cast<LayerNorm>(blocks["final_layer_norm"]);
|
||||||
|
|
||||||
auto x = embeddings->forward(ctx, input_ids, tkn_embeddings); // [N, n_token, hidden_size]
|
auto x = embeddings->forward(ctx, input_ids, tkn_embeddings); // [N, n_token, hidden_size]
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "clip_text.prelude", "x");
|
x = encoder->forward(ctx, x, mask, return_pooled ? -1 : clip_skip);
|
||||||
x = encoder->forward(ctx, x, mask, return_pooled ? -1 : clip_skip, "clip_text");
|
|
||||||
if (return_pooled || with_final_ln) {
|
if (return_pooled || with_final_ln) {
|
||||||
x = final_layer_norm->forward(ctx, x);
|
x = final_layer_norm->forward(ctx, x);
|
||||||
}
|
}
|
||||||
@ -373,8 +368,7 @@ public:
|
|||||||
|
|
||||||
auto x = embeddings->forward(ctx, pixel_values); // [N, num_positions, embed_dim]
|
auto x = embeddings->forward(ctx, pixel_values); // [N, num_positions, embed_dim]
|
||||||
x = pre_layernorm->forward(ctx, x);
|
x = pre_layernorm->forward(ctx, x);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "clip_vision.prelude", "x");
|
x = encoder->forward(ctx, x, nullptr, clip_skip);
|
||||||
x = encoder->forward(ctx, x, nullptr, clip_skip, "clip_vision");
|
|
||||||
|
|
||||||
auto last_hidden_state = x;
|
auto last_hidden_state = x;
|
||||||
|
|
||||||
|
|||||||
@ -1,9 +1,7 @@
|
|||||||
#ifndef __COMMON_BLOCK_HPP__
|
#ifndef __COMMON_BLOCK_HPP__
|
||||||
#define __COMMON_BLOCK_HPP__
|
#define __COMMON_BLOCK_HPP__
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
#include "ggml_extend.hpp"
|
#include "ggml_extend.hpp"
|
||||||
#include "util.h"
|
|
||||||
|
|
||||||
class DownSampleBlock : public GGMLBlock {
|
class DownSampleBlock : public GGMLBlock {
|
||||||
protected:
|
protected:
|
||||||
@ -250,6 +248,9 @@ public:
|
|||||||
float scale = 1.f;
|
float scale = 1.f;
|
||||||
if (precision_fix) {
|
if (precision_fix) {
|
||||||
scale = 1.f / 128.f;
|
scale = 1.f / 128.f;
|
||||||
|
#ifdef SD_USE_VULKAN
|
||||||
|
force_prec_f32 = true;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
// The purpose of the scale here is to prevent NaN issues in certain situations.
|
// The purpose of the scale here is to prevent NaN issues in certain situations.
|
||||||
// For example, when using Vulkan without enabling force_prec_f32,
|
// For example, when using Vulkan without enabling force_prec_f32,
|
||||||
@ -263,9 +264,6 @@ public:
|
|||||||
|
|
||||||
auto net_0 = std::dynamic_pointer_cast<UnaryBlock>(blocks["net.0"]);
|
auto net_0 = std::dynamic_pointer_cast<UnaryBlock>(blocks["net.0"]);
|
||||||
auto net_2 = std::dynamic_pointer_cast<Linear>(blocks["net.2"]);
|
auto net_2 = std::dynamic_pointer_cast<Linear>(blocks["net.2"]);
|
||||||
if (sd_backend_is(ctx->backend, "Vulkan")) {
|
|
||||||
net_2->set_force_prec_f32(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim]
|
x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim]
|
||||||
x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out]
|
x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out]
|
||||||
|
|||||||
@ -14,12 +14,6 @@ struct SDCondition {
|
|||||||
sd::Tensor<float> c_concat;
|
sd::Tensor<float> c_concat;
|
||||||
sd::Tensor<int32_t> c_t5_ids;
|
sd::Tensor<int32_t> c_t5_ids;
|
||||||
sd::Tensor<float> c_t5_weights;
|
sd::Tensor<float> c_t5_weights;
|
||||||
sd::Tensor<int32_t> c_input_ids;
|
|
||||||
sd::Tensor<int32_t> c_position_ids;
|
|
||||||
sd::Tensor<int32_t> c_token_types;
|
|
||||||
sd::Tensor<int32_t> c_vinput_mask;
|
|
||||||
std::vector<std::pair<int, sd::Tensor<float>>> c_image_embeds;
|
|
||||||
std::vector<sd::Tensor<float>> c_ref_images;
|
|
||||||
|
|
||||||
std::vector<sd::Tensor<float>> extra_c_crossattns;
|
std::vector<sd::Tensor<float>> extra_c_crossattns;
|
||||||
|
|
||||||
@ -32,24 +26,10 @@ struct SDCondition {
|
|||||||
|
|
||||||
bool empty() const {
|
bool empty() const {
|
||||||
if (!c_crossattn.empty() || !c_vector.empty() || !c_concat.empty() ||
|
if (!c_crossattn.empty() || !c_vector.empty() || !c_concat.empty() ||
|
||||||
!c_t5_ids.empty() || !c_t5_weights.empty() ||
|
!c_t5_ids.empty() || !c_t5_weights.empty()) {
|
||||||
!c_input_ids.empty() || !c_position_ids.empty() ||
|
|
||||||
!c_token_types.empty() || !c_vinput_mask.empty()) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const auto& image_embed : c_image_embeds) {
|
|
||||||
if (!image_embed.second.empty()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& tensor : c_ref_images) {
|
|
||||||
if (!tensor.empty()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& tensor : extra_c_crossattns) {
|
for (const auto& tensor : extra_c_crossattns) {
|
||||||
if (!tensor.empty()) {
|
if (!tensor.empty()) {
|
||||||
return false;
|
return false;
|
||||||
@ -105,7 +85,6 @@ public:
|
|||||||
virtual void free_params_buffer() = 0;
|
virtual void free_params_buffer() = 0;
|
||||||
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) = 0;
|
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) = 0;
|
||||||
virtual size_t get_params_buffer_size() = 0;
|
virtual size_t get_params_buffer_size() = 0;
|
||||||
virtual void set_max_graph_vram_bytes(size_t max_vram_bytes) {}
|
|
||||||
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
||||||
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) {}
|
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) {}
|
||||||
virtual std::tuple<SDCondition, std::vector<bool>> get_learned_condition_with_trigger(int n_threads,
|
virtual std::tuple<SDCondition, std::vector<bool>> get_learned_condition_with_trigger(int n_threads,
|
||||||
@ -186,13 +165,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
return buffer_size;
|
return buffer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
text_model->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
if (sd_version_is_sdxl(version)) {
|
|
||||||
text_model2->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
void set_flash_attention_enabled(bool enabled) override {
|
||||||
text_model->set_flash_attention_enabled(enabled);
|
text_model->set_flash_attention_enabled(enabled);
|
||||||
if (sd_version_is_sdxl(version)) {
|
if (sd_version_is_sdxl(version)) {
|
||||||
@ -809,18 +781,6 @@ struct SD3CLIPEmbedder : public Conditioner {
|
|||||||
return buffer_size;
|
return buffer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
if (clip_l) {
|
|
||||||
clip_l->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
if (clip_g) {
|
|
||||||
clip_g->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
if (t5) {
|
|
||||||
t5->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
void set_flash_attention_enabled(bool enabled) override {
|
||||||
if (clip_l) {
|
if (clip_l) {
|
||||||
clip_l->set_flash_attention_enabled(enabled);
|
clip_l->set_flash_attention_enabled(enabled);
|
||||||
@ -1164,15 +1124,6 @@ struct FluxCLIPEmbedder : public Conditioner {
|
|||||||
return buffer_size;
|
return buffer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
if (clip_l) {
|
|
||||||
clip_l->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
if (t5) {
|
|
||||||
t5->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
void set_flash_attention_enabled(bool enabled) override {
|
||||||
if (clip_l) {
|
if (clip_l) {
|
||||||
clip_l->set_flash_attention_enabled(enabled);
|
clip_l->set_flash_attention_enabled(enabled);
|
||||||
@ -1398,12 +1349,6 @@ struct T5CLIPEmbedder : public Conditioner {
|
|||||||
return buffer_size;
|
return buffer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
if (t5) {
|
|
||||||
t5->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
void set_flash_attention_enabled(bool enabled) override {
|
||||||
if (t5) {
|
if (t5) {
|
||||||
t5->set_flash_attention_enabled(enabled);
|
t5->set_flash_attention_enabled(enabled);
|
||||||
@ -1580,10 +1525,6 @@ struct AnimaConditioner : public Conditioner {
|
|||||||
return llm->get_params_buffer_size();
|
return llm->get_params_buffer_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
llm->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
void set_flash_attention_enabled(bool enabled) override {
|
||||||
llm->set_flash_attention_enabled(enabled);
|
llm->set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
@ -1716,10 +1657,6 @@ struct LLMEmbedder : public Conditioner {
|
|||||||
return buffer_size;
|
return buffer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
llm->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
void set_flash_attention_enabled(bool enabled) override {
|
||||||
llm->set_flash_attention_enabled(enabled);
|
llm->set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|||||||
138
src/convert.cpp
138
src/convert.cpp
@ -1,138 +0,0 @@
|
|||||||
#include <cstring>
|
|
||||||
#include <mutex>
|
|
||||||
#include <regex>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "model.h"
|
|
||||||
#include "model_io/gguf_io.h"
|
|
||||||
#include "model_io/safetensors_io.h"
|
|
||||||
#include "util.h"
|
|
||||||
|
|
||||||
#include "ggml-cpu.h"
|
|
||||||
|
|
||||||
static ggml_type get_export_tensor_type(ModelLoader& model_loader,
|
|
||||||
const TensorStorage& tensor_storage,
|
|
||||||
ggml_type type,
|
|
||||||
const TensorTypeRules& tensor_type_rules) {
|
|
||||||
const std::string& name = tensor_storage.name;
|
|
||||||
ggml_type tensor_type = tensor_storage.type;
|
|
||||||
ggml_type dst_type = type;
|
|
||||||
|
|
||||||
for (const auto& tensor_type_rule : tensor_type_rules) {
|
|
||||||
std::regex pattern(tensor_type_rule.first);
|
|
||||||
if (std::regex_search(name, pattern)) {
|
|
||||||
dst_type = tensor_type_rule.second;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (model_loader.tensor_should_be_converted(tensor_storage, dst_type)) {
|
|
||||||
tensor_type = dst_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
return tensor_type;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool load_tensors_for_export(ModelLoader& model_loader,
|
|
||||||
ggml_context* ggml_ctx,
|
|
||||||
ggml_type type,
|
|
||||||
const TensorTypeRules& tensor_type_rules,
|
|
||||||
std::vector<TensorWriteInfo>& tensors) {
|
|
||||||
std::mutex tensor_mutex;
|
|
||||||
auto on_new_tensor_cb = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) -> bool {
|
|
||||||
const std::string& name = tensor_storage.name;
|
|
||||||
ggml_type tensor_type = get_export_tensor_type(model_loader, tensor_storage, type, tensor_type_rules);
|
|
||||||
|
|
||||||
std::lock_guard<std::mutex> lock(tensor_mutex);
|
|
||||||
ggml_tensor* tensor = ggml_new_tensor(ggml_ctx, tensor_type, tensor_storage.n_dims, tensor_storage.ne);
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
LOG_ERROR("ggml_new_tensor failed");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
ggml_set_name(tensor, name.c_str());
|
|
||||||
|
|
||||||
if (!tensor->data) {
|
|
||||||
GGML_ASSERT(ggml_nelements(tensor) == 0);
|
|
||||||
// Avoid crashing writers by setting a dummy pointer for zero-sized tensors.
|
|
||||||
LOG_DEBUG("setting dummy pointer for zero-sized tensor %s", name.c_str());
|
|
||||||
tensor->data = ggml_get_mem_buffer(ggml_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
TensorWriteInfo write_info;
|
|
||||||
write_info.tensor = tensor;
|
|
||||||
write_info.n_dims = tensor_storage.n_dims;
|
|
||||||
for (int i = 0; i < tensor_storage.n_dims; ++i) {
|
|
||||||
write_info.ne[i] = tensor_storage.ne[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
*dst_tensor = tensor;
|
|
||||||
tensors.push_back(std::move(write_info));
|
|
||||||
|
|
||||||
return true;
|
|
||||||
};
|
|
||||||
|
|
||||||
bool success = model_loader.load_tensors(on_new_tensor_cb);
|
|
||||||
LOG_INFO("load tensors done");
|
|
||||||
return success;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool convert(const char* input_path,
|
|
||||||
const char* vae_path,
|
|
||||||
const char* output_path,
|
|
||||||
sd_type_t output_type,
|
|
||||||
const char* tensor_type_rules,
|
|
||||||
bool convert_name) {
|
|
||||||
ModelLoader model_loader;
|
|
||||||
|
|
||||||
if (!model_loader.init_from_file(input_path)) {
|
|
||||||
LOG_ERROR("init model loader from file failed: '%s'", input_path);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vae_path != nullptr && strlen(vae_path) > 0) {
|
|
||||||
if (!model_loader.init_from_file(vae_path, "vae.")) {
|
|
||||||
LOG_ERROR("init model loader from file failed: '%s'", vae_path);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (convert_name) {
|
|
||||||
model_loader.convert_tensors_name();
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_type type = (ggml_type)output_type;
|
|
||||||
bool output_is_safetensors = ends_with(output_path, ".safetensors");
|
|
||||||
TensorTypeRules type_rules = parse_tensor_type_rules(tensor_type_rules);
|
|
||||||
|
|
||||||
auto backend = ggml_backend_cpu_init();
|
|
||||||
size_t mem_size = 1 * 1024 * 1024; // for padding
|
|
||||||
mem_size += model_loader.get_tensor_storage_map().size() * ggml_tensor_overhead();
|
|
||||||
mem_size += model_loader.get_params_mem_size(backend, type);
|
|
||||||
LOG_INFO("model tensors mem size: %.2fMB", mem_size / 1024.f / 1024.f);
|
|
||||||
ggml_context* ggml_ctx = ggml_init({mem_size, nullptr, false});
|
|
||||||
|
|
||||||
if (ggml_ctx == nullptr) {
|
|
||||||
LOG_ERROR("ggml_init failed for converter");
|
|
||||||
ggml_backend_free(backend);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<TensorWriteInfo> tensors;
|
|
||||||
bool success = load_tensors_for_export(model_loader, ggml_ctx, type, type_rules, tensors);
|
|
||||||
ggml_backend_free(backend);
|
|
||||||
|
|
||||||
std::string error;
|
|
||||||
if (success) {
|
|
||||||
if (output_is_safetensors) {
|
|
||||||
success = write_safetensors_file(output_path, tensors, &error);
|
|
||||||
} else {
|
|
||||||
success = write_gguf_file(output_path, tensors, &error);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!success && !error.empty()) {
|
|
||||||
LOG_ERROR("%s", error.c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_free(ggml_ctx);
|
|
||||||
return success;
|
|
||||||
}
|
|
||||||
240
src/denoiser.hpp
240
src/denoiser.hpp
@ -2,7 +2,6 @@
|
|||||||
#define __DENOISER_HPP__
|
#define __DENOISER_HPP__
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <string>
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#include "ggml_extend.hpp"
|
#include "ggml_extend.hpp"
|
||||||
@ -753,7 +752,7 @@ struct Flux2FlowDenoiser : public FluxFlowDenoiser {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef std::function<sd::Tensor<float>(const sd::Tensor<float>&, float, int, sd::Tensor<float>*)> denoise_cb_t;
|
typedef std::function<sd::Tensor<float>(const sd::Tensor<float>&, float, int)> denoise_cb_t;
|
||||||
|
|
||||||
static std::pair<float, float> get_ancestral_step(float sigma_from,
|
static std::pair<float, float> get_ancestral_step(float sigma_from,
|
||||||
float sigma_to,
|
float sigma_to,
|
||||||
@ -809,18 +808,6 @@ static std::tuple<float, float, float> get_ancestral_step_flow(float sigma_from,
|
|||||||
return {sigma_down, sigma_up, alpha_scale};
|
return {sigma_down, sigma_up, alpha_scale};
|
||||||
}
|
}
|
||||||
|
|
||||||
static std::tuple<float, float, float> get_ancestral_step(float sigma_from,
|
|
||||||
float sigma_to,
|
|
||||||
float eta,
|
|
||||||
bool is_flow_denoiser) {
|
|
||||||
if (is_flow_denoiser) {
|
|
||||||
return get_ancestral_step_flow(sigma_from, sigma_to, eta);
|
|
||||||
} else {
|
|
||||||
auto [sigma_down, sigma_up] = get_ancestral_step(sigma_from, sigma_to, eta);
|
|
||||||
return {sigma_down, sigma_up, 1.0f};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static sd::Tensor<float> sample_euler_ancestral(denoise_cb_t model,
|
static sd::Tensor<float> sample_euler_ancestral(denoise_cb_t model,
|
||||||
sd::Tensor<float> x,
|
sd::Tensor<float> x,
|
||||||
const std::vector<float>& sigmas,
|
const std::vector<float>& sigmas,
|
||||||
@ -829,7 +816,7 @@ static sd::Tensor<float> sample_euler_ancestral(denoise_cb_t model,
|
|||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
auto denoised_opt = model(x, sigma, i + 1, nullptr);
|
auto denoised_opt = model(x, sigma, i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -852,7 +839,7 @@ static sd::Tensor<float> sample_euler_flow(denoise_cb_t model,
|
|||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
auto denoised_opt = model(x, sigma, i + 1, nullptr);
|
auto denoised_opt = model(x, sigma, i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -874,7 +861,7 @@ static sd::Tensor<float> sample_euler(denoise_cb_t model,
|
|||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
auto denoised_opt = model(x, sigma, i + 1, nullptr);
|
auto denoised_opt = model(x, sigma, i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -890,7 +877,7 @@ static sd::Tensor<float> sample_heun(denoise_cb_t model,
|
|||||||
const std::vector<float>& sigmas) {
|
const std::vector<float>& sigmas) {
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], -(i + 1), nullptr);
|
auto denoised_opt = model(x, sigmas[i], -(i + 1));
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -901,7 +888,7 @@ static sd::Tensor<float> sample_heun(denoise_cb_t model,
|
|||||||
x += d * dt;
|
x += d * dt;
|
||||||
} else {
|
} else {
|
||||||
sd::Tensor<float> x2 = x + d * dt;
|
sd::Tensor<float> x2 = x + d * dt;
|
||||||
auto denoised2_opt = model(x2, sigmas[i + 1], i + 1, nullptr);
|
auto denoised2_opt = model(x2, sigmas[i + 1], i + 1);
|
||||||
if (denoised2_opt.empty()) {
|
if (denoised2_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -918,7 +905,7 @@ static sd::Tensor<float> sample_dpm2(denoise_cb_t model,
|
|||||||
const std::vector<float>& sigmas) {
|
const std::vector<float>& sigmas) {
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], -(i + 1), nullptr);
|
auto denoised_opt = model(x, sigmas[i], -(i + 1));
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -931,7 +918,7 @@ static sd::Tensor<float> sample_dpm2(denoise_cb_t model,
|
|||||||
float dt_1 = sigma_mid - sigmas[i];
|
float dt_1 = sigma_mid - sigmas[i];
|
||||||
float dt_2 = sigmas[i + 1] - sigmas[i];
|
float dt_2 = sigmas[i + 1] - sigmas[i];
|
||||||
sd::Tensor<float> x2 = x + d * dt_1;
|
sd::Tensor<float> x2 = x + d * dt_1;
|
||||||
auto denoised2_opt = model(x2, sigma_mid, i + 1, nullptr);
|
auto denoised2_opt = model(x2, sigma_mid, i + 1);
|
||||||
if (denoised2_opt.empty()) {
|
if (denoised2_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -952,7 +939,7 @@ static sd::Tensor<float> sample_dpmpp_2s_ancestral(denoise_cb_t model,
|
|||||||
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], -(i + 1), nullptr);
|
auto denoised_opt = model(x, sigmas[i], -(i + 1));
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -968,7 +955,7 @@ static sd::Tensor<float> sample_dpmpp_2s_ancestral(denoise_cb_t model,
|
|||||||
float s = t + 0.5f * h;
|
float s = t + 0.5f * h;
|
||||||
float sigma_s = sigma_fn(s);
|
float sigma_s = sigma_fn(s);
|
||||||
sd::Tensor<float> x2 = (sigma_s / sigma_fn(t)) * x - (exp(-h * 0.5f) - 1) * denoised;
|
sd::Tensor<float> x2 = (sigma_s / sigma_fn(t)) * x - (exp(-h * 0.5f) - 1) * denoised;
|
||||||
auto denoised2_opt = model(x2, sigma_s, i + 1, nullptr);
|
auto denoised2_opt = model(x2, sigma_s, i + 1);
|
||||||
if (denoised2_opt.empty()) {
|
if (denoised2_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -995,7 +982,7 @@ static sd::Tensor<float> sample_dpmpp_2s_ancestral_flow(denoise_cb_t model,
|
|||||||
|
|
||||||
bool opt_first_step = (1.0 - sigma < 1e-6);
|
bool opt_first_step = (1.0 - sigma < 1e-6);
|
||||||
|
|
||||||
auto denoised_opt = model(x, sigma, (opt_first_step ? 1 : -1) * (i + 1), nullptr);
|
auto denoised_opt = model(x, sigma, (opt_first_step ? 1 : -1) * (i + 1));
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1024,8 +1011,8 @@ static sd::Tensor<float> sample_dpmpp_2s_ancestral_flow(denoise_cb_t model,
|
|||||||
// so sigma_s = 1 = sigma, and sigma_s_i_ratio = sigma_s / sigma = 1
|
// so sigma_s = 1 = sigma, and sigma_s_i_ratio = sigma_s / sigma = 1
|
||||||
// u = (x*sigma_s_i_ratio)+(denoised*(1.0f-sigma_s_i_ratio))
|
// u = (x*sigma_s_i_ratio)+(denoised*(1.0f-sigma_s_i_ratio))
|
||||||
// = (x*1)+(denoised*0) = x
|
// = (x*1)+(denoised*0) = x
|
||||||
// so D_i = model(u, sigma_s, i + 1, nullptr)
|
// so D_i = model(u, sigma_s, i + 1)
|
||||||
// = model(x, sigma, i + 1, nullptr)
|
// = model(x, sigma, i + 1)
|
||||||
// = denoised
|
// = denoised
|
||||||
D_i = denoised;
|
D_i = denoised;
|
||||||
|
|
||||||
@ -1058,7 +1045,7 @@ static sd::Tensor<float> sample_dpmpp_2s_ancestral_flow(denoise_cb_t model,
|
|||||||
float sigma_s_i_ratio = sigma_s / sigma;
|
float sigma_s_i_ratio = sigma_s / sigma;
|
||||||
sd::Tensor<float> u = (x * sigma_s_i_ratio) + (denoised * (1.0f - sigma_s_i_ratio));
|
sd::Tensor<float> u = (x * sigma_s_i_ratio) + (denoised * (1.0f - sigma_s_i_ratio));
|
||||||
|
|
||||||
auto denoised2_opt = model(u, sigma_s, i + 1, nullptr);
|
auto denoised2_opt = model(u, sigma_s, i + 1);
|
||||||
if (denoised2_opt.empty()) {
|
if (denoised2_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1085,7 +1072,7 @@ static sd::Tensor<float> sample_dpmpp_2m(denoise_cb_t model,
|
|||||||
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], i + 1, nullptr);
|
auto denoised_opt = model(x, sigmas[i], i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1117,7 +1104,7 @@ static sd::Tensor<float> sample_dpmpp_2m_v2(denoise_cb_t model,
|
|||||||
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], i + 1, nullptr);
|
auto denoised_opt = model(x, sigmas[i], i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1149,83 +1136,10 @@ static sd::Tensor<float> sample_lcm(denoise_cb_t model,
|
|||||||
sd::Tensor<float> x,
|
sd::Tensor<float> x,
|
||||||
const std::vector<float>& sigmas,
|
const std::vector<float>& sigmas,
|
||||||
std::shared_ptr<RNG> rng,
|
std::shared_ptr<RNG> rng,
|
||||||
bool is_flow_denoiser,
|
bool is_flow_denoiser) {
|
||||||
const char* extra_sample_args = nullptr) {
|
|
||||||
struct LCMSampleArgs {
|
|
||||||
float noise_clip_std = 0.0f;
|
|
||||||
float noise_scale_start = 1.0f;
|
|
||||||
float noise_scale_end = 1.0f;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto trim = [](std::string value) -> std::string {
|
|
||||||
const char* whitespace = " \t\r\n";
|
|
||||||
size_t begin = value.find_first_not_of(whitespace);
|
|
||||||
if (begin == std::string::npos) {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
size_t end = value.find_last_not_of(whitespace);
|
|
||||||
return value.substr(begin, end - begin + 1);
|
|
||||||
};
|
|
||||||
|
|
||||||
LCMSampleArgs args;
|
|
||||||
if (extra_sample_args != nullptr && extra_sample_args[0] != '\0') {
|
|
||||||
std::string raw(extra_sample_args);
|
|
||||||
size_t start = 0;
|
|
||||||
bool noise_scale_end_was_set = false;
|
|
||||||
bool noise_scale_start_was_set = false;
|
|
||||||
auto parse_arg = [&](const std::string& item) {
|
|
||||||
std::string token = trim(item);
|
|
||||||
if (token.empty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
size_t eq = token.find('=');
|
|
||||||
if (eq == std::string::npos) {
|
|
||||||
LOG_WARN("ignoring invalid lcm extra sample arg '%s'", token.c_str());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string key = trim(token.substr(0, eq));
|
|
||||||
std::string value = trim(token.substr(eq + 1));
|
|
||||||
float parsed = 0.0f;
|
|
||||||
try {
|
|
||||||
size_t consumed = 0;
|
|
||||||
parsed = std::stof(value, &consumed);
|
|
||||||
if (trim(value.substr(consumed)).size() != 0) {
|
|
||||||
LOG_WARN("ignoring invalid lcm extra sample arg '%s'", token.c_str());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} catch (const std::exception&) {
|
|
||||||
LOG_WARN("ignoring invalid lcm extra sample arg '%s'", token.c_str());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (key == "noise_clip_std") {
|
|
||||||
args.noise_clip_std = parsed;
|
|
||||||
} else if (key == "noise_scale_start") {
|
|
||||||
args.noise_scale_start = parsed;
|
|
||||||
noise_scale_start_was_set = true;
|
|
||||||
} else if (key == "noise_scale_end") {
|
|
||||||
args.noise_scale_end = parsed;
|
|
||||||
noise_scale_end_was_set = true;
|
|
||||||
} else {
|
|
||||||
LOG_WARN("ignoring unknown lcm extra sample arg '%s'", key.c_str());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
for (size_t pos = 0; pos <= raw.size(); ++pos) {
|
|
||||||
if (pos == raw.size() || raw[pos] == ',' || raw[pos] == ';') {
|
|
||||||
parse_arg(raw.substr(start, pos - start));
|
|
||||||
start = pos + 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (noise_scale_start_was_set && !noise_scale_end_was_set) {
|
|
||||||
args.noise_scale_end = args.noise_scale_start;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], i + 1, nullptr);
|
auto denoised_opt = model(x, sigmas[i], i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1234,27 +1148,7 @@ static sd::Tensor<float> sample_lcm(denoise_cb_t model,
|
|||||||
if (is_flow_denoiser) {
|
if (is_flow_denoiser) {
|
||||||
x *= (1 - sigmas[i + 1]);
|
x *= (1 - sigmas[i + 1]);
|
||||||
}
|
}
|
||||||
auto noise = sd::Tensor<float>::randn_like(x, rng);
|
x += sd::Tensor<float>::randn_like(x, rng) * sigmas[i + 1];
|
||||||
if (args.noise_clip_std > 0.0f && noise.numel() > 0) {
|
|
||||||
double mean = 0.0;
|
|
||||||
for (int64_t j = 0; j < noise.numel(); ++j) {
|
|
||||||
mean += static_cast<double>(noise[j]);
|
|
||||||
}
|
|
||||||
mean /= static_cast<double>(noise.numel());
|
|
||||||
|
|
||||||
double variance = 0.0;
|
|
||||||
for (int64_t j = 0; j < noise.numel(); ++j) {
|
|
||||||
double centered = static_cast<double>(noise[j]) - mean;
|
|
||||||
variance += centered * centered;
|
|
||||||
}
|
|
||||||
variance /= static_cast<double>(noise.numel());
|
|
||||||
|
|
||||||
float clip_val = args.noise_clip_std * static_cast<float>(std::sqrt(variance));
|
|
||||||
noise = sd::ops::clamp(noise, -clip_val, clip_val);
|
|
||||||
}
|
|
||||||
float t = steps > 1 ? static_cast<float>(i) / static_cast<float>(steps - 1) : 0.0f;
|
|
||||||
float noise_scale = args.noise_scale_start + (args.noise_scale_end - args.noise_scale_start) * t;
|
|
||||||
x += noise * (sigmas[i + 1] * noise_scale);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return x;
|
return x;
|
||||||
@ -1271,7 +1165,7 @@ static sd::Tensor<float> sample_ipndm(denoise_cb_t model,
|
|||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
float sigma_next = sigmas[i + 1];
|
float sigma_next = sigmas[i + 1];
|
||||||
|
|
||||||
auto denoised_opt = model(x, sigma, i + 1, nullptr);
|
auto denoised_opt = model(x, sigma, i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1315,7 +1209,7 @@ static sd::Tensor<float> sample_ipndm_v(denoise_cb_t model,
|
|||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
float t_next = sigmas[i + 1];
|
float t_next = sigmas[i + 1];
|
||||||
|
|
||||||
auto denoised_opt = model(x, sigma, i + 1, nullptr);
|
auto denoised_opt = model(x, sigma, i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1353,7 +1247,6 @@ static sd::Tensor<float> sample_res_multistep(denoise_cb_t model,
|
|||||||
sd::Tensor<float> x,
|
sd::Tensor<float> x,
|
||||||
const std::vector<float>& sigmas,
|
const std::vector<float>& sigmas,
|
||||||
std::shared_ptr<RNG> rng,
|
std::shared_ptr<RNG> rng,
|
||||||
bool is_flow_denoiser,
|
|
||||||
float eta) {
|
float eta) {
|
||||||
sd::Tensor<float> old_denoised = x;
|
sd::Tensor<float> old_denoised = x;
|
||||||
bool have_old_sigma = false;
|
bool have_old_sigma = false;
|
||||||
@ -1377,7 +1270,7 @@ static sd::Tensor<float> sample_res_multistep(denoise_cb_t model,
|
|||||||
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
auto denoised_opt = model(x, sigmas[i], i + 1, nullptr);
|
auto denoised_opt = model(x, sigmas[i], i + 1);
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1385,8 +1278,7 @@ static sd::Tensor<float> sample_res_multistep(denoise_cb_t model,
|
|||||||
|
|
||||||
float sigma_from = sigmas[i];
|
float sigma_from = sigmas[i];
|
||||||
float sigma_to = sigmas[i + 1];
|
float sigma_to = sigmas[i + 1];
|
||||||
|
auto [sigma_down, sigma_up] = get_ancestral_step(sigma_from, sigma_to, eta);
|
||||||
auto [sigma_down, sigma_up, alpha_scale] = get_ancestral_step(sigma_from, sigma_to, eta, is_flow_denoiser);
|
|
||||||
|
|
||||||
if (sigma_down == 0.0f || !have_old_sigma) {
|
if (sigma_down == 0.0f || !have_old_sigma) {
|
||||||
x += ((x - denoised) / sigma_from) * (sigma_down - sigma_from);
|
x += ((x - denoised) / sigma_from) * (sigma_down - sigma_from);
|
||||||
@ -1413,10 +1305,7 @@ static sd::Tensor<float> sample_res_multistep(denoise_cb_t model,
|
|||||||
x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised);
|
x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sigma_to > 0.0f && sigma_up > 0.0f) {
|
if (sigmas[i + 1] > 0 && sigma_up > 0.0f) {
|
||||||
if (is_flow_denoiser) {
|
|
||||||
x *= alpha_scale;
|
|
||||||
}
|
|
||||||
x += sd::Tensor<float>::randn_like(x, rng) * sigma_up;
|
x += sd::Tensor<float>::randn_like(x, rng) * sigma_up;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1431,7 +1320,6 @@ static sd::Tensor<float> sample_res_2s(denoise_cb_t model,
|
|||||||
sd::Tensor<float> x,
|
sd::Tensor<float> x,
|
||||||
const std::vector<float>& sigmas,
|
const std::vector<float>& sigmas,
|
||||||
std::shared_ptr<RNG> rng,
|
std::shared_ptr<RNG> rng,
|
||||||
bool is_flow_denoiser,
|
|
||||||
float eta) {
|
float eta) {
|
||||||
const float c2 = 0.5f;
|
const float c2 = 0.5f;
|
||||||
auto t_fn = [](float sigma) -> float { return -logf(sigma); };
|
auto t_fn = [](float sigma) -> float { return -logf(sigma); };
|
||||||
@ -1454,13 +1342,13 @@ static sd::Tensor<float> sample_res_2s(denoise_cb_t model,
|
|||||||
float sigma_from = sigmas[i];
|
float sigma_from = sigmas[i];
|
||||||
float sigma_to = sigmas[i + 1];
|
float sigma_to = sigmas[i + 1];
|
||||||
|
|
||||||
auto denoised_opt = model(x, sigma_from, -(i + 1), nullptr);
|
auto denoised_opt = model(x, sigma_from, -(i + 1));
|
||||||
if (denoised_opt.empty()) {
|
if (denoised_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
sd::Tensor<float> denoised = std::move(denoised_opt);
|
sd::Tensor<float> denoised = std::move(denoised_opt);
|
||||||
|
|
||||||
auto [sigma_down, sigma_up, alpha_scale] = get_ancestral_step(sigma_from, sigma_to, eta, is_flow_denoiser);
|
auto [sigma_down, sigma_up] = get_ancestral_step(sigma_from, sigma_to, eta);
|
||||||
|
|
||||||
sd::Tensor<float> x0 = x;
|
sd::Tensor<float> x0 = x;
|
||||||
if (sigma_down == 0.0f || sigma_from == 0.0f) {
|
if (sigma_down == 0.0f || sigma_from == 0.0f) {
|
||||||
@ -1480,7 +1368,7 @@ static sd::Tensor<float> sample_res_2s(denoise_cb_t model,
|
|||||||
sd::Tensor<float> eps1 = denoised - x0;
|
sd::Tensor<float> eps1 = denoised - x0;
|
||||||
sd::Tensor<float> x2 = x0 + eps1 * (h * a21);
|
sd::Tensor<float> x2 = x0 + eps1 * (h * a21);
|
||||||
|
|
||||||
auto denoised2_opt = model(x2, sigma_c2, i + 1, nullptr);
|
auto denoised2_opt = model(x2, sigma_c2, i + 1);
|
||||||
if (denoised2_opt.empty()) {
|
if (denoised2_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1489,10 +1377,7 @@ static sd::Tensor<float> sample_res_2s(denoise_cb_t model,
|
|||||||
x = x0 + h * (b1 * eps1 + b2 * eps2);
|
x = x0 + h * (b1 * eps1 + b2 * eps2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sigma_to > 0.0f && sigma_up > 0.0f) {
|
if (sigmas[i + 1] > 0 && sigma_up > 0.0f) {
|
||||||
if (is_flow_denoiser) {
|
|
||||||
x *= alpha_scale;
|
|
||||||
}
|
|
||||||
x += sd::Tensor<float>::randn_like(x, rng) * sigma_up;
|
x += sd::Tensor<float>::randn_like(x, rng) * sigma_up;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1557,7 +1442,7 @@ static sd::Tensor<float> sample_er_sde(denoise_cb_t model,
|
|||||||
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
int steps = static_cast<int>(sigmas.size()) - 1;
|
||||||
for (int i = 0; i < steps; i++) {
|
for (int i = 0; i < steps; i++) {
|
||||||
sd::Tensor<float> denoised = model(x, sigmas[i], i + 1, nullptr);
|
sd::Tensor<float> denoised = model(x, sigmas[i], i + 1);
|
||||||
if (denoised.empty()) {
|
if (denoised.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1643,7 +1528,7 @@ static sd::Tensor<float> sample_ddim_trailing(denoise_cb_t model,
|
|||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
float sigma_to = sigmas[i + 1];
|
float sigma_to = sigmas[i + 1];
|
||||||
|
|
||||||
auto model_output_opt = model(x, sigma, i + 1, nullptr);
|
auto model_output_opt = model(x, sigma, i + 1);
|
||||||
if (model_output_opt.empty()) {
|
if (model_output_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1715,7 +1600,7 @@ static sd::Tensor<float> sample_tcd(denoise_cb_t model,
|
|||||||
int timestep_s = (int)floor((1 - eta) * prev_timestep);
|
int timestep_s = (int)floor((1 - eta) * prev_timestep);
|
||||||
float sigma = sigmas[i];
|
float sigma = sigmas[i];
|
||||||
|
|
||||||
auto model_output_opt = model(x, sigma, i + 1, nullptr);
|
auto model_output_opt = model(x, sigma, i + 1);
|
||||||
if (model_output_opt.empty()) {
|
if (model_output_opt.empty()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
@ -1743,56 +1628,6 @@ static sd::Tensor<float> sample_tcd(denoise_cb_t model,
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
static sd::Tensor<float> sample_euler_cfg_pp(denoise_cb_t model,
|
|
||||||
sd::Tensor<float> x,
|
|
||||||
const std::vector<float>& sigmas) {
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
|
||||||
for (int i = 0; i < steps; i++) {
|
|
||||||
float sigma = sigmas[i];
|
|
||||||
sd::Tensor<float> uncond_denoised;
|
|
||||||
|
|
||||||
auto denoised_opt = model(x, sigma, i + 1, &uncond_denoised);
|
|
||||||
if (denoised_opt.empty() || uncond_denoised.empty()) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
sd::Tensor<float> denoised = std::move(denoised_opt);
|
|
||||||
sd::Tensor<float> d = (x - uncond_denoised) / sigma;
|
|
||||||
|
|
||||||
x = denoised + d * sigmas[i + 1];
|
|
||||||
}
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
static sd::Tensor<float> sample_euler_ancestral_cfg_pp(denoise_cb_t model,
|
|
||||||
sd::Tensor<float> x,
|
|
||||||
const std::vector<float>& sigmas,
|
|
||||||
std::shared_ptr<RNG> rng,
|
|
||||||
float eta) {
|
|
||||||
int steps = static_cast<int>(sigmas.size()) - 1;
|
|
||||||
for (int i = 0; i < steps; i++) {
|
|
||||||
float sigma = sigmas[i];
|
|
||||||
sd::Tensor<float> uncond_denoised;
|
|
||||||
|
|
||||||
auto denoised_opt = model(x, sigma, i + 1, &uncond_denoised);
|
|
||||||
if (denoised_opt.empty() || uncond_denoised.empty()) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
sd::Tensor<float> denoised = std::move(denoised_opt);
|
|
||||||
sd::Tensor<float> d = (x - uncond_denoised) / sigma;
|
|
||||||
|
|
||||||
auto [sigma_down, sigma_up] = get_ancestral_step(sigmas[i], sigmas[i + 1], eta);
|
|
||||||
|
|
||||||
x = denoised + d * sigma_down;
|
|
||||||
|
|
||||||
if (sigmas[i + 1] > 0) {
|
|
||||||
x += sd::Tensor<float>::randn_like(x, rng) * sigma_up;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
// k diffusion reverse ODE: dx = (x - D(x;\sigma)) / \sigma dt; \sigma(t) = t
|
// k diffusion reverse ODE: dx = (x - D(x;\sigma)) / \sigma dt; \sigma(t) = t
|
||||||
static sd::Tensor<float> sample_k_diffusion(sample_method_t method,
|
static sd::Tensor<float> sample_k_diffusion(sample_method_t method,
|
||||||
denoise_cb_t model,
|
denoise_cb_t model,
|
||||||
@ -1800,8 +1635,7 @@ static sd::Tensor<float> sample_k_diffusion(sample_method_t method,
|
|||||||
std::vector<float> sigmas,
|
std::vector<float> sigmas,
|
||||||
std::shared_ptr<RNG> rng,
|
std::shared_ptr<RNG> rng,
|
||||||
float eta,
|
float eta,
|
||||||
bool is_flow_denoiser,
|
bool is_flow_denoiser) {
|
||||||
const char* extra_sample_args) {
|
|
||||||
switch (method) {
|
switch (method) {
|
||||||
case EULER_A_SAMPLE_METHOD:
|
case EULER_A_SAMPLE_METHOD:
|
||||||
if (is_flow_denoiser)
|
if (is_flow_denoiser)
|
||||||
@ -1824,25 +1658,21 @@ static sd::Tensor<float> sample_k_diffusion(sample_method_t method,
|
|||||||
case DPMPP2Mv2_SAMPLE_METHOD:
|
case DPMPP2Mv2_SAMPLE_METHOD:
|
||||||
return sample_dpmpp_2m_v2(model, std::move(x), sigmas);
|
return sample_dpmpp_2m_v2(model, std::move(x), sigmas);
|
||||||
case LCM_SAMPLE_METHOD:
|
case LCM_SAMPLE_METHOD:
|
||||||
return sample_lcm(model, std::move(x), sigmas, rng, is_flow_denoiser, extra_sample_args);
|
return sample_lcm(model, std::move(x), sigmas, rng, is_flow_denoiser);
|
||||||
case IPNDM_SAMPLE_METHOD:
|
case IPNDM_SAMPLE_METHOD:
|
||||||
return sample_ipndm(model, std::move(x), sigmas);
|
return sample_ipndm(model, std::move(x), sigmas);
|
||||||
case IPNDM_V_SAMPLE_METHOD:
|
case IPNDM_V_SAMPLE_METHOD:
|
||||||
return sample_ipndm_v(model, std::move(x), sigmas);
|
return sample_ipndm_v(model, std::move(x), sigmas);
|
||||||
case RES_MULTISTEP_SAMPLE_METHOD:
|
case RES_MULTISTEP_SAMPLE_METHOD:
|
||||||
return sample_res_multistep(model, std::move(x), sigmas, rng, is_flow_denoiser, eta);
|
return sample_res_multistep(model, std::move(x), sigmas, rng, eta);
|
||||||
case RES_2S_SAMPLE_METHOD:
|
case RES_2S_SAMPLE_METHOD:
|
||||||
return sample_res_2s(model, std::move(x), sigmas, rng, is_flow_denoiser, eta);
|
return sample_res_2s(model, std::move(x), sigmas, rng, eta);
|
||||||
case ER_SDE_SAMPLE_METHOD:
|
case ER_SDE_SAMPLE_METHOD:
|
||||||
return sample_er_sde(model, std::move(x), sigmas, rng, is_flow_denoiser, eta);
|
return sample_er_sde(model, std::move(x), sigmas, rng, is_flow_denoiser, eta);
|
||||||
case DDIM_TRAILING_SAMPLE_METHOD:
|
case DDIM_TRAILING_SAMPLE_METHOD:
|
||||||
return sample_ddim_trailing(model, std::move(x), sigmas, rng, eta);
|
return sample_ddim_trailing(model, std::move(x), sigmas, rng, eta);
|
||||||
case TCD_SAMPLE_METHOD:
|
case TCD_SAMPLE_METHOD:
|
||||||
return sample_tcd(model, std::move(x), sigmas, rng, eta);
|
return sample_tcd(model, std::move(x), sigmas, rng, eta);
|
||||||
case EULER_CFG_PP_SAMPLE_METHOD:
|
|
||||||
return sample_euler_cfg_pp(model, std::move(x), sigmas);
|
|
||||||
case EULER_A_CFG_PP_SAMPLE_METHOD:
|
|
||||||
return sample_euler_ancestral_cfg_pp(model, std::move(x), sigmas, rng, eta);
|
|
||||||
default:
|
default:
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,7 +5,6 @@
|
|||||||
#include "anima.hpp"
|
#include "anima.hpp"
|
||||||
#include "ernie_image.hpp"
|
#include "ernie_image.hpp"
|
||||||
#include "flux.hpp"
|
#include "flux.hpp"
|
||||||
#include "hidream_o1.hpp"
|
|
||||||
#include "mmdit.hpp"
|
#include "mmdit.hpp"
|
||||||
#include "qwen_image.hpp"
|
#include "qwen_image.hpp"
|
||||||
#include "tensor_ggml.hpp"
|
#include "tensor_ggml.hpp"
|
||||||
@ -23,12 +22,6 @@ struct DiffusionParams {
|
|||||||
const sd::Tensor<float>* t5_weights = nullptr;
|
const sd::Tensor<float>* t5_weights = nullptr;
|
||||||
const sd::Tensor<float>* guidance = nullptr;
|
const sd::Tensor<float>* guidance = nullptr;
|
||||||
const std::vector<sd::Tensor<float>>* ref_latents = nullptr;
|
const std::vector<sd::Tensor<float>>* ref_latents = nullptr;
|
||||||
const sd::Tensor<int32_t>* input_ids = nullptr;
|
|
||||||
const sd::Tensor<int32_t>* input_pos = nullptr;
|
|
||||||
const sd::Tensor<int32_t>* token_types = nullptr;
|
|
||||||
const sd::Tensor<int32_t>* vinput_mask = nullptr;
|
|
||||||
const std::vector<sd::Tensor<float>>* vlm_images = nullptr;
|
|
||||||
const std::vector<std::pair<int, sd::Tensor<float>>>* image_embeds = nullptr;
|
|
||||||
bool increase_ref_index = false;
|
bool increase_ref_index = false;
|
||||||
int num_video_frames = -1;
|
int num_video_frames = -1;
|
||||||
const std::vector<sd::Tensor<float>>* controls = nullptr;
|
const std::vector<sd::Tensor<float>>* controls = nullptr;
|
||||||
@ -56,7 +49,6 @@ struct DiffusionModel {
|
|||||||
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){};
|
virtual void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter){};
|
||||||
virtual int64_t get_adm_in_channels() = 0;
|
virtual int64_t get_adm_in_channels() = 0;
|
||||||
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
virtual void set_flash_attention_enabled(bool enabled) = 0;
|
||||||
virtual void set_max_graph_vram_bytes(size_t max_vram_bytes) = 0;
|
|
||||||
virtual void set_circular_axes(bool circular_x, bool circular_y) = 0;
|
virtual void set_circular_axes(bool circular_x, bool circular_y) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -106,10 +98,6 @@ struct UNetModel : public DiffusionModel {
|
|||||||
unet.set_flash_attention_enabled(enabled);
|
unet.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
unet.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
unet.set_circular_axes(circular_x, circular_y);
|
unet.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -176,10 +164,6 @@ struct MMDiTModel : public DiffusionModel {
|
|||||||
mmdit.set_flash_attention_enabled(enabled);
|
mmdit.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
mmdit.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
mmdit.set_circular_axes(circular_x, circular_y);
|
mmdit.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -245,10 +229,6 @@ struct FluxModel : public DiffusionModel {
|
|||||||
flux.set_flash_attention_enabled(enabled);
|
flux.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
flux.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
flux.set_circular_axes(circular_x, circular_y);
|
flux.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -319,10 +299,6 @@ struct AnimaModel : public DiffusionModel {
|
|||||||
anima.set_flash_attention_enabled(enabled);
|
anima.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
anima.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
anima.set_circular_axes(circular_x, circular_y);
|
anima.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -388,10 +364,6 @@ struct WanModel : public DiffusionModel {
|
|||||||
wan.set_flash_attention_enabled(enabled);
|
wan.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
wan.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
wan.set_circular_axes(circular_x, circular_y);
|
wan.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -461,10 +433,6 @@ struct QwenImageModel : public DiffusionModel {
|
|||||||
qwen_image.set_flash_attention_enabled(enabled);
|
qwen_image.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
qwen_image.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
qwen_image.set_circular_axes(circular_x, circular_y);
|
qwen_image.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -483,82 +451,6 @@ struct QwenImageModel : public DiffusionModel {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct HiDreamO1Model : public DiffusionModel {
|
|
||||||
std::string prefix;
|
|
||||||
HiDreamO1::HiDreamO1Runner hidream_o1;
|
|
||||||
|
|
||||||
HiDreamO1Model(ggml_backend_t backend,
|
|
||||||
bool offload_params_to_cpu,
|
|
||||||
const String2TensorStorage& tensor_storage_map = {},
|
|
||||||
const std::string& prefix = "model")
|
|
||||||
: prefix(prefix), hidream_o1(backend, offload_params_to_cpu, tensor_storage_map, prefix) {
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string get_desc() override {
|
|
||||||
return hidream_o1.get_desc();
|
|
||||||
}
|
|
||||||
|
|
||||||
void alloc_params_buffer() override {
|
|
||||||
hidream_o1.alloc_params_buffer();
|
|
||||||
}
|
|
||||||
|
|
||||||
void free_params_buffer() override {
|
|
||||||
hidream_o1.free_params_buffer();
|
|
||||||
}
|
|
||||||
|
|
||||||
void free_compute_buffer() override {
|
|
||||||
hidream_o1.free_compute_buffer();
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
|
||||||
hidream_o1.get_param_tensors(tensors, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t get_params_buffer_size() override {
|
|
||||||
return hidream_o1.get_params_buffer_size();
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) override {
|
|
||||||
hidream_o1.set_weight_adapter(adapter);
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t get_adm_in_channels() override {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) {
|
|
||||||
hidream_o1.set_flash_attention_enabled(enabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
hidream_o1.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
|
||||||
hidream_o1.set_circular_axes(circular_x, circular_y);
|
|
||||||
}
|
|
||||||
|
|
||||||
sd::Tensor<float> compute(int n_threads,
|
|
||||||
const DiffusionParams& diffusion_params) override {
|
|
||||||
GGML_ASSERT(diffusion_params.x != nullptr);
|
|
||||||
GGML_ASSERT(diffusion_params.timesteps != nullptr);
|
|
||||||
GGML_ASSERT(diffusion_params.input_ids != nullptr);
|
|
||||||
GGML_ASSERT(diffusion_params.input_pos != nullptr);
|
|
||||||
GGML_ASSERT(diffusion_params.token_types != nullptr);
|
|
||||||
static const std::vector<sd::Tensor<float>> empty_images;
|
|
||||||
static const std::vector<std::pair<int, sd::Tensor<float>>> empty_image_embeds;
|
|
||||||
return hidream_o1.compute(n_threads,
|
|
||||||
*diffusion_params.x,
|
|
||||||
*diffusion_params.timesteps,
|
|
||||||
*diffusion_params.input_ids,
|
|
||||||
*diffusion_params.input_pos,
|
|
||||||
*diffusion_params.token_types,
|
|
||||||
tensor_or_empty(diffusion_params.vinput_mask),
|
|
||||||
diffusion_params.image_embeds ? *diffusion_params.image_embeds : empty_image_embeds,
|
|
||||||
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_images);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ZImageModel : public DiffusionModel {
|
struct ZImageModel : public DiffusionModel {
|
||||||
std::string prefix;
|
std::string prefix;
|
||||||
ZImage::ZImageRunner z_image;
|
ZImage::ZImageRunner z_image;
|
||||||
@ -607,10 +499,6 @@ struct ZImageModel : public DiffusionModel {
|
|||||||
z_image.set_flash_attention_enabled(enabled);
|
z_image.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
z_image.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
z_image.set_circular_axes(circular_x, circular_y);
|
z_image.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
@ -676,10 +564,6 @@ struct ErnieImageModel : public DiffusionModel {
|
|||||||
ernie_image.set_flash_attention_enabled(enabled);
|
ernie_image.set_flash_attention_enabled(enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes) override {
|
|
||||||
ernie_image.set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_circular_axes(bool circular_x, bool circular_y) override {
|
void set_circular_axes(bool circular_x, bool circular_y) override {
|
||||||
ernie_image.set_circular_axes(circular_x, circular_y);
|
ernie_image.set_circular_axes(circular_x, circular_y);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -295,8 +295,6 @@ namespace ErnieImage {
|
|||||||
auto c = time_embedding->forward(ctx, sample); // [N, hidden_size]
|
auto c = time_embedding->forward(ctx, sample); // [N, hidden_size]
|
||||||
|
|
||||||
auto mod_params = adaLN_mod->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 6 * hidden_size]
|
auto mod_params = adaLN_mod->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 6 * hidden_size]
|
||||||
sd::ggml_graph_cut::mark_graph_cut(hidden_states, "ernie_image.prelude", "hidden_states");
|
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(mod_params, "ernie_image.prelude", "mod_params");
|
|
||||||
auto chunks = ggml_ext_chunk(ctx->ggml_ctx, mod_params, 6, 0);
|
auto chunks = ggml_ext_chunk(ctx->ggml_ctx, mod_params, 6, 0);
|
||||||
std::vector<ggml_tensor*> temb;
|
std::vector<ggml_tensor*> temb;
|
||||||
temb.reserve(6);
|
temb.reserve(6);
|
||||||
@ -307,7 +305,6 @@ namespace ErnieImage {
|
|||||||
for (int i = 0; i < params.num_layers; i++) {
|
for (int i = 0; i < params.num_layers; i++) {
|
||||||
auto layer = std::dynamic_pointer_cast<ErnieImageSharedAdaLNBlock>(blocks["layers." + std::to_string(i)]);
|
auto layer = std::dynamic_pointer_cast<ErnieImageSharedAdaLNBlock>(blocks["layers." + std::to_string(i)]);
|
||||||
hidden_states = layer->forward(ctx, hidden_states, pe, temb);
|
hidden_states = layer->forward(ctx, hidden_states, pe, temb);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(hidden_states, "ernie_image.layers." + std::to_string(i), "hidden_states");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
hidden_states = final_norm->forward(ctx, hidden_states, c);
|
hidden_states = final_norm->forward(ctx, hidden_states, c);
|
||||||
|
|||||||
@ -125,32 +125,26 @@ public:
|
|||||||
auto conv_last = std::dynamic_pointer_cast<Conv2d>(blocks["conv_last"]);
|
auto conv_last = std::dynamic_pointer_cast<Conv2d>(blocks["conv_last"]);
|
||||||
|
|
||||||
auto feat = conv_first->forward(ctx, x);
|
auto feat = conv_first->forward(ctx, x);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.prelude", "feat");
|
|
||||||
auto body_feat = feat;
|
auto body_feat = feat;
|
||||||
for (int i = 0; i < num_block; i++) {
|
for (int i = 0; i < num_block; i++) {
|
||||||
std::string name = "body." + std::to_string(i);
|
std::string name = "body." + std::to_string(i);
|
||||||
auto block = std::dynamic_pointer_cast<RRDB>(blocks[name]);
|
auto block = std::dynamic_pointer_cast<RRDB>(blocks[name]);
|
||||||
|
|
||||||
body_feat = block->forward(ctx, body_feat);
|
body_feat = block->forward(ctx, body_feat);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(body_feat, "esrgan.body." + std::to_string(i), "feat");
|
|
||||||
}
|
}
|
||||||
body_feat = conv_body->forward(ctx, body_feat);
|
body_feat = conv_body->forward(ctx, body_feat);
|
||||||
feat = ggml_add(ctx->ggml_ctx, feat, body_feat);
|
feat = ggml_add(ctx->ggml_ctx, feat, body_feat);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.body.out", "feat");
|
|
||||||
// upsample
|
// upsample
|
||||||
if (scale >= 2) {
|
if (scale >= 2) {
|
||||||
auto conv_up1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv_up1"]);
|
auto conv_up1 = std::dynamic_pointer_cast<Conv2d>(blocks["conv_up1"]);
|
||||||
feat = lrelu(ctx, conv_up1->forward(ctx, ggml_upscale(ctx->ggml_ctx, feat, 2, GGML_SCALE_MODE_NEAREST)));
|
feat = lrelu(ctx, conv_up1->forward(ctx, ggml_upscale(ctx->ggml_ctx, feat, 2, GGML_SCALE_MODE_NEAREST)));
|
||||||
sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.up1", "feat");
|
|
||||||
if (scale == 4) {
|
if (scale == 4) {
|
||||||
auto conv_up2 = std::dynamic_pointer_cast<Conv2d>(blocks["conv_up2"]);
|
auto conv_up2 = std::dynamic_pointer_cast<Conv2d>(blocks["conv_up2"]);
|
||||||
feat = lrelu(ctx, conv_up2->forward(ctx, ggml_upscale(ctx->ggml_ctx, feat, 2, GGML_SCALE_MODE_NEAREST)));
|
feat = lrelu(ctx, conv_up2->forward(ctx, ggml_upscale(ctx->ggml_ctx, feat, 2, GGML_SCALE_MODE_NEAREST)));
|
||||||
sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.up2", "feat");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// for all scales
|
// for all scales
|
||||||
auto out = conv_last->forward(ctx, lrelu(ctx, conv_hr->forward(ctx, feat)));
|
auto out = conv_last->forward(ctx, lrelu(ctx, conv_hr->forward(ctx, feat)));
|
||||||
sd::ggml_graph_cut::mark_graph_cut(out, "esrgan.final", "out");
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
@ -928,9 +928,6 @@ namespace Flux {
|
|||||||
}
|
}
|
||||||
|
|
||||||
txt = txt_in->forward(ctx, txt);
|
txt = txt_in->forward(ctx, txt);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(img, "flux.prelude", "img");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt, "flux.prelude", "txt");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(vec, "flux.prelude", "vec");
|
|
||||||
|
|
||||||
for (int i = 0; i < params.depth; i++) {
|
for (int i = 0; i < params.depth; i++) {
|
||||||
if (skip_layers.size() > 0 && std::find(skip_layers.begin(), skip_layers.end(), i) != skip_layers.end()) {
|
if (skip_layers.size() > 0 && std::find(skip_layers.begin(), skip_layers.end(), i) != skip_layers.end()) {
|
||||||
@ -942,8 +939,6 @@ namespace Flux {
|
|||||||
auto img_txt = block->forward(ctx, img, txt, vec, pe, txt_img_mask, ds_img_mods, ds_txt_mods);
|
auto img_txt = block->forward(ctx, img, txt, vec, pe, txt_img_mask, ds_img_mods, ds_txt_mods);
|
||||||
img = img_txt.first; // [N, n_img_token, hidden_size]
|
img = img_txt.first; // [N, n_img_token, hidden_size]
|
||||||
txt = img_txt.second; // [N, n_txt_token, hidden_size]
|
txt = img_txt.second; // [N, n_txt_token, hidden_size]
|
||||||
sd::ggml_graph_cut::mark_graph_cut(img, "flux.double_blocks." + std::to_string(i), "img");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt, "flux.double_blocks." + std::to_string(i), "txt");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto txt_img = ggml_concat(ctx->ggml_ctx, txt, img, 1); // [N, n_txt_token + n_img_token, hidden_size]
|
auto txt_img = ggml_concat(ctx->ggml_ctx, txt, img, 1); // [N, n_txt_token + n_img_token, hidden_size]
|
||||||
@ -954,7 +949,6 @@ namespace Flux {
|
|||||||
auto block = std::dynamic_pointer_cast<SingleStreamBlock>(blocks["single_blocks." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<SingleStreamBlock>(blocks["single_blocks." + std::to_string(i)]);
|
||||||
|
|
||||||
txt_img = block->forward(ctx, txt_img, vec, pe, txt_img_mask, ss_mods);
|
txt_img = block->forward(ctx, txt_img, vec, pe, txt_img_mask, ss_mods);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt_img, "flux.single_blocks." + std::to_string(i), "txt_img");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
img = ggml_view_3d(ctx->ggml_ctx,
|
img = ggml_view_3d(ctx->ggml_ctx,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,298 +0,0 @@
|
|||||||
#ifndef __GGML_EXTEND_BACKEND_HPP__
|
|
||||||
#define __GGML_EXTEND_BACKEND_HPP__
|
|
||||||
|
|
||||||
#include <cstring>
|
|
||||||
#include <mutex>
|
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
#ifndef __STATIC_INLINE__
|
|
||||||
#define __STATIC_INLINE__ static inline
|
|
||||||
#endif
|
|
||||||
|
|
||||||
inline void ggml_backend_load_all_once() {
|
|
||||||
// If the registry already has devices and the CPU backend is present,
|
|
||||||
// assume either static registration or explicit host-side preloading has
|
|
||||||
// completed and avoid rescanning the default paths.
|
|
||||||
if (ggml_backend_dev_count() > 0 && ggml_backend_reg_by_name("CPU") != nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// In dynamic-backend mode the backend modules are discovered at runtime,
|
|
||||||
// so we must load them before asking for the CPU backend or its proc table.
|
|
||||||
// If the host preloaded only a subset of backends, allow one default-path
|
|
||||||
// scan so missing modules can still be discovered.
|
|
||||||
static std::once_flag once;
|
|
||||||
std::call_once(once, []() {
|
|
||||||
if (ggml_backend_dev_count() > 0 && ggml_backend_reg_by_name("CPU") != nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ggml_backend_load_all();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do not gate this branch on GGML_CPU or GGML_CPU_ALL_VARIANTS:
|
|
||||||
// those are CMake options used to configure ggml itself, but they are not
|
|
||||||
// exported as PUBLIC compile definitions to stable-diffusion in backend-DL mode.
|
|
||||||
// In practice, this target can reliably see GGML_BACKEND_DL, but not whether
|
|
||||||
// the CPU backend was compiled as a loadable module. We therefore use runtime
|
|
||||||
// backend discovery instead of compile-time assumptions.
|
|
||||||
|
|
||||||
__STATIC_INLINE__ ggml_backend_reg_t ggml_backend_cpu_reg() {
|
|
||||||
ggml_backend_reg_t reg = ggml_backend_reg_by_name("CPU");
|
|
||||||
if (reg != nullptr) {
|
|
||||||
return reg;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_load_all_once();
|
|
||||||
return ggml_backend_reg_by_name("CPU");
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ ggml_backend_reg_t ggml_backend_reg_from_backend(ggml_backend_t backend) {
|
|
||||||
if (backend != nullptr) {
|
|
||||||
ggml_backend_dev_t device = ggml_backend_get_device(backend);
|
|
||||||
if (device != nullptr) {
|
|
||||||
return ggml_backend_dev_backend_reg(device);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ggml_backend_cpu_reg();
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ ggml_backend_t ggml_backend_cpu_init() {
|
|
||||||
ggml_backend_t backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
|
|
||||||
if (backend != nullptr) {
|
|
||||||
return backend;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_load_all_once();
|
|
||||||
return ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ bool ggml_backend_is_cpu(ggml_backend_t backend) {
|
|
||||||
if (backend == nullptr) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_dev_t device = ggml_backend_get_device(backend);
|
|
||||||
if (device != nullptr) {
|
|
||||||
return ggml_backend_dev_type(device) == GGML_BACKEND_DEVICE_TYPE_CPU;
|
|
||||||
}
|
|
||||||
|
|
||||||
const char* backend_name = ggml_backend_name(backend);
|
|
||||||
return backend_name != nullptr && std::strcmp(backend_name, "CPU") == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
|
|
||||||
ggml_backend_reg_t reg = ggml_backend_reg_from_backend(backend_cpu);
|
|
||||||
if (reg == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto fn = reinterpret_cast<ggml_backend_set_n_threads_t>(ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"));
|
|
||||||
if (fn != nullptr) {
|
|
||||||
fn(backend_cpu, n_threads);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
using __ggml_backend_cpu_set_threadpool_t = void (*)(ggml_backend_t backend_cpu, ggml_threadpool_t threadpool);
|
|
||||||
|
|
||||||
__STATIC_INLINE__ void ggml_backend_cpu_set_threadpool(ggml_backend_t backend_cpu, ggml_threadpool_t threadpool) {
|
|
||||||
ggml_backend_reg_t reg = ggml_backend_reg_from_backend(backend_cpu);
|
|
||||||
if (reg == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto fn = reinterpret_cast<__ggml_backend_cpu_set_threadpool_t>(ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool"));
|
|
||||||
if (fn != nullptr) {
|
|
||||||
fn(backend_cpu, threadpool);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void* abort_callback_data) {
|
|
||||||
ggml_backend_reg_t reg = ggml_backend_reg_from_backend(backend_cpu);
|
|
||||||
if (reg == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto fn = reinterpret_cast<ggml_backend_set_abort_callback_t>(ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"));
|
|
||||||
if (fn != nullptr) {
|
|
||||||
fn(backend_cpu, abort_callback, abort_callback_data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ ggml_backend_buffer_t ggml_backend_tensor_buffer(const struct ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ bool ggml_backend_tensor_is_host_accessible(const struct ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr || tensor->data == nullptr) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t buffer = ggml_backend_tensor_buffer(tensor);
|
|
||||||
return buffer == nullptr || ggml_backend_buffer_is_host(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ size_t ggml_backend_tensor_offset(const struct ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
|
|
||||||
return (size_t)(i0 * tensor->nb[0] + i1 * tensor->nb[1] + i2 * tensor->nb[2] + i3 * tensor->nb[3]);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
__STATIC_INLINE__ void ggml_backend_tensor_write_scalar(const struct ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3, T value) {
|
|
||||||
const size_t offset = ggml_backend_tensor_offset(tensor, i0, i1, i2, i3);
|
|
||||||
|
|
||||||
if (ggml_backend_tensor_is_host_accessible(tensor)) {
|
|
||||||
auto* dst = reinterpret_cast<T*>(reinterpret_cast<char*>(tensor->data) + offset);
|
|
||||||
*dst = value;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_tensor_set(const_cast<struct ggml_tensor*>(tensor), &value, offset, sizeof(T));
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ void ggml_set_f32_nd(const struct ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3, float value) {
|
|
||||||
switch (tensor->type) {
|
|
||||||
case GGML_TYPE_I8:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, static_cast<int8_t>(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_I16:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, static_cast<int16_t>(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_I32:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, static_cast<int32_t>(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_F16:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, ggml_fp32_to_fp16(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_BF16:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, ggml_fp32_to_bf16(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_F32:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, value);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
GGML_ABORT("fatal error");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ void ggml_set_f32_1d(const struct ggml_tensor* tensor, int i, float value) {
|
|
||||||
if (!ggml_is_contiguous(tensor)) {
|
|
||||||
int64_t id[4] = {0, 0, 0, 0};
|
|
||||||
ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
|
|
||||||
ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (tensor->type) {
|
|
||||||
case GGML_TYPE_I8:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, static_cast<int8_t>(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_I16:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, static_cast<int16_t>(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_I32:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, static_cast<int32_t>(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_F16:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, ggml_fp32_to_fp16(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_BF16:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, ggml_fp32_to_bf16(value));
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_F32:
|
|
||||||
ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, value);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
GGML_ABORT("fatal error");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context* ctx, struct ggml_cgraph* cgraph, int n_threads) {
|
|
||||||
(void)ctx;
|
|
||||||
|
|
||||||
// The legacy ggml_graph_compute_with_ctx() symbol lives in ggml-cpu, but
|
|
||||||
// the backend proc table does not expose it in GGML_BACKEND_DL mode.
|
|
||||||
// Recreate the old behavior by initializing the CPU backend explicitly and
|
|
||||||
// executing the graph through the generic backend API.
|
|
||||||
ggml_backend_t backend = ggml_backend_cpu_init();
|
|
||||||
if (backend == nullptr) {
|
|
||||||
return GGML_STATUS_ALLOC_FAILED;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_cpu_set_n_threads(backend, n_threads);
|
|
||||||
|
|
||||||
const enum ggml_status status = ggml_backend_graph_compute(backend, cgraph);
|
|
||||||
ggml_backend_free(backend);
|
|
||||||
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
__STATIC_INLINE__ ggml_tensor* ggml_set_f32(struct ggml_tensor* tensor, float value) {
|
|
||||||
GGML_ASSERT(tensor != nullptr);
|
|
||||||
|
|
||||||
if (ggml_backend_tensor_is_host_accessible(tensor) && ggml_is_contiguous(tensor)) {
|
|
||||||
const int64_t nelements = ggml_nelements(tensor);
|
|
||||||
|
|
||||||
switch (tensor->type) {
|
|
||||||
case GGML_TYPE_I8: {
|
|
||||||
auto* data = reinterpret_cast<int8_t*>(tensor->data);
|
|
||||||
const int8_t v = static_cast<int8_t>(value);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
data[i] = v;
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_I16: {
|
|
||||||
auto* data = reinterpret_cast<int16_t*>(tensor->data);
|
|
||||||
const int16_t v = static_cast<int16_t>(value);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
data[i] = v;
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_I32: {
|
|
||||||
auto* data = reinterpret_cast<int32_t*>(tensor->data);
|
|
||||||
const int32_t v = static_cast<int32_t>(value);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
data[i] = v;
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_F16: {
|
|
||||||
auto* data = reinterpret_cast<ggml_fp16_t*>(tensor->data);
|
|
||||||
const ggml_fp16_t v = ggml_fp32_to_fp16(value);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
data[i] = v;
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_BF16: {
|
|
||||||
auto* data = reinterpret_cast<ggml_bf16_t*>(tensor->data);
|
|
||||||
const ggml_bf16_t v = ggml_fp32_to_bf16(value);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
data[i] = v;
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_F32: {
|
|
||||||
auto* data = reinterpret_cast<float*>(tensor->data);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
data[i] = value;
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
default:
|
|
||||||
GGML_ABORT("fatal error");
|
|
||||||
}
|
|
||||||
|
|
||||||
return tensor;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int64_t nelements = ggml_nelements(tensor);
|
|
||||||
for (int64_t i = 0; i < nelements; ++i) {
|
|
||||||
ggml_set_f32_1d(tensor, static_cast<int>(i), value);
|
|
||||||
}
|
|
||||||
|
|
||||||
return tensor;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@ -1,700 +0,0 @@
|
|||||||
#include "ggml_graph_cut.h"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <cstring>
|
|
||||||
#include <map>
|
|
||||||
#include <set>
|
|
||||||
#include <sstream>
|
|
||||||
#include <stack>
|
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
#include "ggml-alloc.h"
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
#include "util.h"
|
|
||||||
|
|
||||||
#include "../ggml/src/ggml-impl.h"
|
|
||||||
|
|
||||||
namespace sd::ggml_graph_cut {
|
|
||||||
|
|
||||||
static std::string graph_cut_tensor_display_name(const ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return "<null>";
|
|
||||||
}
|
|
||||||
if (tensor->name[0] != '\0') {
|
|
||||||
return tensor->name;
|
|
||||||
}
|
|
||||||
return sd_format("<tensor@%p>", (const void*)tensor);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int graph_leaf_index(ggml_cgraph* gf, const ggml_tensor* tensor) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
GGML_ASSERT(tensor != nullptr);
|
|
||||||
for (int i = 0; i < gf->n_leafs; ++i) {
|
|
||||||
if (gf->leafs[i] == tensor) {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool is_params_tensor(const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return params_tensor_set.find(tensor) != params_tensor_set.end();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int graph_node_index_by_name(ggml_cgraph* gf, const char* name) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
if (name == nullptr || name[0] == '\0') {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
const int n_nodes = ggml_graph_n_nodes(gf);
|
|
||||||
for (int i = 0; i < n_nodes; ++i) {
|
|
||||||
ggml_tensor* node = ggml_graph_node(gf, i);
|
|
||||||
if (node != nullptr && std::strcmp(node->name, name) == 0) {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Plan::InputShape input_shape(const ggml_tensor* tensor) {
|
|
||||||
Plan::InputShape shape;
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return shape;
|
|
||||||
}
|
|
||||||
shape.type = tensor->type;
|
|
||||||
for (int i = 0; i < GGML_MAX_DIMS; ++i) {
|
|
||||||
shape.ne[static_cast<size_t>(i)] = tensor->ne[i];
|
|
||||||
}
|
|
||||||
return shape;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t graph_cut_segment_vram_bytes(const Segment& segment) {
|
|
||||||
return segment.compute_buffer_size +
|
|
||||||
segment.input_param_bytes +
|
|
||||||
segment.input_previous_cut_bytes +
|
|
||||||
segment.output_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
static Segment make_segment_seed(const Plan& plan,
|
|
||||||
size_t start_segment_index,
|
|
||||||
size_t end_segment_index) {
|
|
||||||
GGML_ASSERT(start_segment_index < plan.segments.size());
|
|
||||||
GGML_ASSERT(end_segment_index < plan.segments.size());
|
|
||||||
GGML_ASSERT(start_segment_index <= end_segment_index);
|
|
||||||
|
|
||||||
Segment seed;
|
|
||||||
const auto& start_segment = plan.segments[start_segment_index];
|
|
||||||
const auto& target_segment = plan.segments[end_segment_index];
|
|
||||||
std::unordered_set<int> seen_output_node_indices;
|
|
||||||
for (size_t seg_idx = start_segment_index; seg_idx <= end_segment_index; ++seg_idx) {
|
|
||||||
for (int output_node_index : plan.segments[seg_idx].output_node_indices) {
|
|
||||||
if (seen_output_node_indices.insert(output_node_index).second) {
|
|
||||||
seed.output_node_indices.push_back(output_node_index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (start_segment_index == end_segment_index) {
|
|
||||||
seed.group_name = target_segment.group_name;
|
|
||||||
} else {
|
|
||||||
seed.group_name = sd_format("%s..%s",
|
|
||||||
start_segment.group_name.c_str(),
|
|
||||||
target_segment.group_name.c_str());
|
|
||||||
}
|
|
||||||
return seed;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void build_segment(ggml_cgraph* gf,
|
|
||||||
Plan& plan,
|
|
||||||
Segment& segment,
|
|
||||||
const std::unordered_map<const ggml_tensor*, int>& producer_index,
|
|
||||||
std::unordered_set<int>& available_cut_output_node_indices,
|
|
||||||
ggml_backend_t backend,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc) {
|
|
||||||
std::set<int> internal_nodes;
|
|
||||||
std::unordered_set<const ggml_tensor*> input_seen;
|
|
||||||
std::vector<Segment::InputRef> input_refs;
|
|
||||||
|
|
||||||
std::stack<ggml_tensor*> work_stack;
|
|
||||||
for (int output_node_index : segment.output_node_indices) {
|
|
||||||
ggml_tensor* output = ggml_graph_node(gf, output_node_index);
|
|
||||||
if (output != nullptr) {
|
|
||||||
work_stack.push(output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
while (!work_stack.empty()) {
|
|
||||||
ggml_tensor* tensor = work_stack.top();
|
|
||||||
work_stack.pop();
|
|
||||||
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto producer_it = producer_index.find(tensor);
|
|
||||||
if (producer_it == producer_index.end()) {
|
|
||||||
if (input_seen.insert(tensor).second) {
|
|
||||||
Segment::InputRef input_ref;
|
|
||||||
input_ref.type = is_params_tensor(params_tensor_set, tensor) ? Segment::INPUT_PARAM : Segment::INPUT_EXTERNAL;
|
|
||||||
input_ref.display_name = graph_cut_tensor_display_name(tensor);
|
|
||||||
input_ref.leaf_index = graph_leaf_index(gf, tensor);
|
|
||||||
input_refs.push_back(std::move(input_ref));
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
int node_idx = producer_it->second;
|
|
||||||
if (available_cut_output_node_indices.find(node_idx) != available_cut_output_node_indices.end()) {
|
|
||||||
if (input_seen.insert(tensor).second) {
|
|
||||||
Segment::InputRef input_ref;
|
|
||||||
input_ref.type = Segment::INPUT_PREVIOUS_CUT;
|
|
||||||
input_ref.display_name = graph_cut_tensor_display_name(tensor);
|
|
||||||
input_ref.node_index = node_idx;
|
|
||||||
input_refs.push_back(std::move(input_ref));
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!internal_nodes.insert(node_idx).second) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* node = ggml_graph_node(gf, node_idx);
|
|
||||||
for (int src_idx = 0; src_idx < GGML_MAX_SRC; ++src_idx) {
|
|
||||||
if (node->src[src_idx] != nullptr) {
|
|
||||||
work_stack.push(node->src[src_idx]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!internal_nodes.empty()) {
|
|
||||||
segment.internal_node_indices.assign(internal_nodes.begin(), internal_nodes.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
std::sort(input_refs.begin(),
|
|
||||||
input_refs.end(),
|
|
||||||
[](const Segment::InputRef& a, const Segment::InputRef& b) {
|
|
||||||
if (a.type != b.type) {
|
|
||||||
return a.type < b.type;
|
|
||||||
}
|
|
||||||
return a.display_name < b.display_name;
|
|
||||||
});
|
|
||||||
segment.input_refs = input_refs;
|
|
||||||
for (const auto& input : input_refs) {
|
|
||||||
ggml_tensor* current_input = input_tensor(gf, input);
|
|
||||||
size_t tensor_bytes = current_input == nullptr
|
|
||||||
? 0
|
|
||||||
: (input.type == Segment::INPUT_PREVIOUS_CUT
|
|
||||||
? cache_tensor_bytes(current_input)
|
|
||||||
: ggml_nbytes(current_input));
|
|
||||||
switch (input.type) {
|
|
||||||
case Segment::INPUT_PREVIOUS_CUT:
|
|
||||||
segment.input_previous_cut_bytes += tensor_bytes;
|
|
||||||
break;
|
|
||||||
case Segment::INPUT_PARAM:
|
|
||||||
segment.input_param_bytes += tensor_bytes;
|
|
||||||
break;
|
|
||||||
case Segment::INPUT_EXTERNAL:
|
|
||||||
default:
|
|
||||||
segment.input_external_bytes += tensor_bytes;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (int output_node_index : segment.output_node_indices) {
|
|
||||||
ggml_tensor* output = ggml_graph_node(gf, output_node_index);
|
|
||||||
segment.output_bytes += cache_tensor_bytes(output);
|
|
||||||
}
|
|
||||||
segment.compute_buffer_size = measure_segment_compute_buffer(backend, gf, segment, log_desc);
|
|
||||||
|
|
||||||
for (int output_node_index : segment.output_node_indices) {
|
|
||||||
available_cut_output_node_indices.insert(output_node_index);
|
|
||||||
}
|
|
||||||
plan.segments.push_back(std::move(segment));
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_graph_cut_tensor(const ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr || tensor->name[0] == '\0') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return std::strncmp(tensor->name, GGML_RUNNER_CUT_PREFIX, std::strlen(GGML_RUNNER_CUT_PREFIX)) == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string make_graph_cut_name(const std::string& group, const std::string& output) {
|
|
||||||
return std::string(GGML_RUNNER_CUT_PREFIX) + group + "|" + output;
|
|
||||||
}
|
|
||||||
|
|
||||||
void mark_graph_cut(ggml_tensor* tensor, const std::string& group, const std::string& output) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto name = make_graph_cut_name(group, output);
|
|
||||||
ggml_set_name(tensor, name.c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
int leaf_count(ggml_cgraph* gf) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
return gf->n_leafs;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* leaf_tensor(ggml_cgraph* gf, int leaf_index) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
if (leaf_index < 0 || leaf_index >= gf->n_leafs) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return gf->leafs[leaf_index];
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t tensor_buffer(const ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* cache_source_tensor(ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
if (tensor_buffer(tensor) == nullptr && tensor->src[0] != nullptr &&
|
|
||||||
ggml_nelements(tensor->src[0]) == ggml_nelements(tensor) &&
|
|
||||||
ggml_nbytes(tensor->src[0]) == ggml_nbytes(tensor)) {
|
|
||||||
return cache_source_tensor(tensor->src[0]);
|
|
||||||
}
|
|
||||||
return tensor->view_src ? tensor->view_src : tensor;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t cache_tensor_bytes(const ggml_tensor* tensor) {
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
const ggml_tensor* cache_src = tensor->view_src ? tensor->view_src : tensor;
|
|
||||||
return ggml_nbytes(cache_src);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool plan_matches_graph(ggml_cgraph* gf, const Plan& plan) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
if (ggml_graph_n_nodes(gf) != plan.n_nodes || gf->n_leafs != plan.n_leafs) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for (const auto& input_shape_ref : plan.input_shapes) {
|
|
||||||
if (input_shape_ref.leaf_index < 0 || input_shape_ref.leaf_index >= gf->n_leafs) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
ggml_tensor* leaf = gf->leafs[input_shape_ref.leaf_index];
|
|
||||||
if (leaf == nullptr || input_shape_ref.type != leaf->type) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for (int d = 0; d < GGML_MAX_DIMS; ++d) {
|
|
||||||
if (input_shape_ref.ne[static_cast<size_t>(d)] != leaf->ne[d]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* output_tensor(ggml_cgraph* gf, const Segment& segment, size_t output_index) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
if (output_index >= segment.output_node_indices.size()) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
int node_index = segment.output_node_indices[output_index];
|
|
||||||
if (node_index < 0 || node_index >= ggml_graph_n_nodes(gf)) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return ggml_graph_node(gf, node_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* input_tensor(ggml_cgraph* gf, const Segment::InputRef& input_ref) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
if (input_ref.type == Segment::INPUT_PREVIOUS_CUT) {
|
|
||||||
if (input_ref.node_index < 0 || input_ref.node_index >= ggml_graph_n_nodes(gf)) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return ggml_graph_node(gf, input_ref.node_index);
|
|
||||||
}
|
|
||||||
if (input_ref.leaf_index < 0 || input_ref.leaf_index >= gf->n_leafs) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return leaf_tensor(gf, input_ref.leaf_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ggml_tensor*> param_tensors(ggml_cgraph* gf, const Segment& segment) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
std::vector<ggml_tensor*> tensors;
|
|
||||||
std::unordered_set<ggml_tensor*> seen_tensors;
|
|
||||||
tensors.reserve(segment.input_refs.size());
|
|
||||||
seen_tensors.reserve(segment.input_refs.size());
|
|
||||||
for (const auto& input_ref : segment.input_refs) {
|
|
||||||
if (input_ref.type != Segment::INPUT_PARAM) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ggml_tensor* tensor = input_tensor(gf, input_ref);
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (seen_tensors.insert(tensor).second) {
|
|
||||||
tensors.push_back(tensor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tensors;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ggml_tensor*> runtime_param_tensors(ggml_cgraph* gf, const Segment& segment, const char* log_desc) {
|
|
||||||
std::vector<ggml_tensor*> tensors = param_tensors(gf, segment);
|
|
||||||
std::vector<ggml_tensor*> filtered_tensors;
|
|
||||||
filtered_tensors.reserve(tensors.size());
|
|
||||||
for (ggml_tensor* tensor : tensors) {
|
|
||||||
if (tensor_buffer(tensor) == nullptr) {
|
|
||||||
LOG_WARN("%s graph cut skipping param input without buffer: segment=%s tensor=%s",
|
|
||||||
log_desc == nullptr ? "unknown" : log_desc,
|
|
||||||
segment.group_name.c_str(),
|
|
||||||
tensor->name);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
filtered_tensors.push_back(tensor);
|
|
||||||
}
|
|
||||||
return filtered_tensors;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unordered_set<std::string> collect_future_input_names(ggml_cgraph* gf,
|
|
||||||
const Plan& plan,
|
|
||||||
size_t current_segment_index) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
std::unordered_set<std::string> future_input_names;
|
|
||||||
for (size_t seg_idx = current_segment_index + 1; seg_idx < plan.segments.size(); ++seg_idx) {
|
|
||||||
const auto& segment = plan.segments[seg_idx];
|
|
||||||
for (const auto& input_ref : segment.input_refs) {
|
|
||||||
if (input_ref.type != Segment::INPUT_PREVIOUS_CUT) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ggml_tensor* current_input = input_tensor(gf, input_ref);
|
|
||||||
if (current_input != nullptr && current_input->name[0] != '\0') {
|
|
||||||
future_input_names.insert(current_input->name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return future_input_names;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_cgraph* build_segment_graph(ggml_cgraph* gf,
|
|
||||||
const Segment& segment,
|
|
||||||
ggml_context** graph_ctx_out) {
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
GGML_ASSERT(graph_ctx_out != nullptr);
|
|
||||||
|
|
||||||
const size_t graph_size = segment.internal_node_indices.size() + segment.input_refs.size() + 8;
|
|
||||||
ggml_init_params params = {
|
|
||||||
/*.mem_size =*/ggml_graph_overhead_custom(graph_size, false) + 1024,
|
|
||||||
/*.mem_buffer =*/nullptr,
|
|
||||||
/*.no_alloc =*/true,
|
|
||||||
};
|
|
||||||
ggml_context* graph_ctx = ggml_init(params);
|
|
||||||
GGML_ASSERT(graph_ctx != nullptr);
|
|
||||||
ggml_cgraph* segment_graph = ggml_new_graph_custom(graph_ctx, graph_size, false);
|
|
||||||
GGML_ASSERT(segment_graph != nullptr);
|
|
||||||
|
|
||||||
for (const auto& input : segment.input_refs) {
|
|
||||||
ggml_tensor* current_input = input_tensor(gf, input);
|
|
||||||
if (current_input == nullptr) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
GGML_ASSERT(segment_graph->n_leafs < segment_graph->size);
|
|
||||||
segment_graph->leafs[segment_graph->n_leafs++] = current_input;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int output_node_index : segment.output_node_indices) {
|
|
||||||
ggml_tensor* output = ggml_graph_node(gf, output_node_index);
|
|
||||||
if (output == nullptr) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ggml_set_output(output);
|
|
||||||
}
|
|
||||||
for (int node_idx : segment.internal_node_indices) {
|
|
||||||
ggml_graph_add_node(segment_graph, ggml_graph_node(gf, node_idx));
|
|
||||||
}
|
|
||||||
*graph_ctx_out = graph_ctx;
|
|
||||||
return segment_graph;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t measure_segment_compute_buffer(ggml_backend_t backend,
|
|
||||||
ggml_cgraph* gf,
|
|
||||||
const Segment& segment,
|
|
||||||
const char* log_desc) {
|
|
||||||
GGML_ASSERT(backend != nullptr);
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
if (segment.internal_node_indices.empty()) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_context* graph_ctx = nullptr;
|
|
||||||
ggml_cgraph* segment_graph = build_segment_graph(gf, segment, &graph_ctx);
|
|
||||||
ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
|
|
||||||
|
|
||||||
size_t sizes[1] = {0};
|
|
||||||
ggml_gallocr_reserve_n_size(
|
|
||||||
allocr,
|
|
||||||
segment_graph,
|
|
||||||
nullptr,
|
|
||||||
nullptr,
|
|
||||||
sizes);
|
|
||||||
size_t buffer_size = sizes[0];
|
|
||||||
|
|
||||||
ggml_gallocr_free(allocr);
|
|
||||||
ggml_free(graph_ctx);
|
|
||||||
return buffer_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan build_plan(ggml_backend_t backend,
|
|
||||||
ggml_cgraph* gf,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc) {
|
|
||||||
GGML_ASSERT(backend != nullptr);
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
Plan plan;
|
|
||||||
plan.available = true;
|
|
||||||
const int n_nodes = ggml_graph_n_nodes(gf);
|
|
||||||
if (n_nodes <= 0) {
|
|
||||||
return plan;
|
|
||||||
}
|
|
||||||
plan.n_nodes = n_nodes;
|
|
||||||
plan.n_leafs = gf->n_leafs;
|
|
||||||
for (int i = 0; i < gf->n_leafs; ++i) {
|
|
||||||
ggml_tensor* leaf = gf->leafs[i];
|
|
||||||
if (is_params_tensor(params_tensor_set, leaf)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
auto shape = input_shape(leaf);
|
|
||||||
shape.leaf_index = i;
|
|
||||||
plan.input_shapes.push_back(shape);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unordered_map<const ggml_tensor*, int> producer_index;
|
|
||||||
producer_index.reserve(static_cast<size_t>(n_nodes));
|
|
||||||
for (int i = 0; i < n_nodes; ++i) {
|
|
||||||
producer_index[ggml_graph_node(gf, i)] = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<Segment> grouped_segments;
|
|
||||||
std::unordered_map<std::string, size_t> group_to_segment;
|
|
||||||
for (int i = 0; i < n_nodes; ++i) {
|
|
||||||
ggml_tensor* node = ggml_graph_node(gf, i);
|
|
||||||
if (!is_graph_cut_tensor(node)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
plan.has_cuts = true;
|
|
||||||
std::string full_name(node->name);
|
|
||||||
std::string payload = full_name.substr(std::strlen(GGML_RUNNER_CUT_PREFIX));
|
|
||||||
size_t sep = payload.find('|');
|
|
||||||
std::string group = sep == std::string::npos ? payload : payload.substr(0, sep);
|
|
||||||
|
|
||||||
auto it = group_to_segment.find(group);
|
|
||||||
if (it == group_to_segment.end()) {
|
|
||||||
Segment segment;
|
|
||||||
segment.group_name = group;
|
|
||||||
segment.output_node_indices.push_back(i);
|
|
||||||
group_to_segment[group] = grouped_segments.size();
|
|
||||||
grouped_segments.push_back(std::move(segment));
|
|
||||||
} else {
|
|
||||||
auto& segment = grouped_segments[it->second];
|
|
||||||
segment.output_node_indices.push_back(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!plan.has_cuts) {
|
|
||||||
return plan;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unordered_set<int> available_cut_output_node_indices;
|
|
||||||
available_cut_output_node_indices.reserve(static_cast<size_t>(n_nodes));
|
|
||||||
for (auto& segment : grouped_segments) {
|
|
||||||
build_segment(gf,
|
|
||||||
plan,
|
|
||||||
segment,
|
|
||||||
producer_index,
|
|
||||||
available_cut_output_node_indices,
|
|
||||||
backend,
|
|
||||||
params_tensor_set,
|
|
||||||
log_desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
int final_output_index = graph_node_index_by_name(gf, "ggml_runner_final_result_tensor");
|
|
||||||
if (final_output_index < 0) {
|
|
||||||
final_output_index = n_nodes - 1;
|
|
||||||
}
|
|
||||||
ggml_tensor* final_output = final_output_index >= 0 ? ggml_graph_node(gf, final_output_index) : nullptr;
|
|
||||||
if (final_output != nullptr && available_cut_output_node_indices.find(final_output_index) == available_cut_output_node_indices.end()) {
|
|
||||||
Segment final_segment;
|
|
||||||
final_segment.group_name = "ggml_runner.final";
|
|
||||||
final_segment.output_node_indices.push_back(final_output_index);
|
|
||||||
build_segment(gf,
|
|
||||||
plan,
|
|
||||||
final_segment,
|
|
||||||
producer_index,
|
|
||||||
available_cut_output_node_indices,
|
|
||||||
backend,
|
|
||||||
params_tensor_set,
|
|
||||||
log_desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
return plan;
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan apply_max_vram_budget(ggml_cgraph* gf,
|
|
||||||
const Plan& base_plan,
|
|
||||||
size_t max_graph_vram_bytes,
|
|
||||||
ggml_backend_t backend,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc) {
|
|
||||||
GGML_ASSERT(backend != nullptr);
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
int64_t t_budget_begin = ggml_time_ms();
|
|
||||||
if (max_graph_vram_bytes == 0 || !base_plan.has_cuts || base_plan.segments.size() <= 1) {
|
|
||||||
return base_plan;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_nodes = ggml_graph_n_nodes(gf);
|
|
||||||
std::unordered_map<const ggml_tensor*, int> producer_index;
|
|
||||||
producer_index.reserve(static_cast<size_t>(n_nodes));
|
|
||||||
for (int i = 0; i < n_nodes; ++i) {
|
|
||||||
producer_index[ggml_graph_node(gf, i)] = i;
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan merged_plan;
|
|
||||||
merged_plan.available = true;
|
|
||||||
merged_plan.has_cuts = base_plan.has_cuts;
|
|
||||||
merged_plan.valid = base_plan.valid;
|
|
||||||
merged_plan.n_nodes = base_plan.n_nodes;
|
|
||||||
merged_plan.n_leafs = base_plan.n_leafs;
|
|
||||||
|
|
||||||
std::unordered_set<int> available_cut_output_node_indices;
|
|
||||||
available_cut_output_node_indices.reserve(static_cast<size_t>(n_nodes));
|
|
||||||
|
|
||||||
size_t start_segment_index = 0;
|
|
||||||
while (start_segment_index < base_plan.segments.size()) {
|
|
||||||
Plan single_plan;
|
|
||||||
auto single_available_cut_output_node_indices = available_cut_output_node_indices;
|
|
||||||
auto single_seed = make_segment_seed(base_plan,
|
|
||||||
start_segment_index,
|
|
||||||
start_segment_index);
|
|
||||||
build_segment(gf,
|
|
||||||
single_plan,
|
|
||||||
single_seed,
|
|
||||||
producer_index,
|
|
||||||
single_available_cut_output_node_indices,
|
|
||||||
backend,
|
|
||||||
params_tensor_set,
|
|
||||||
log_desc);
|
|
||||||
GGML_ASSERT(!single_plan.segments.empty());
|
|
||||||
|
|
||||||
size_t best_end_segment_index = start_segment_index;
|
|
||||||
bool can_merge_next_segment = graph_cut_segment_vram_bytes(single_plan.segments.back()) <= max_graph_vram_bytes;
|
|
||||||
|
|
||||||
while (can_merge_next_segment && best_end_segment_index + 1 < base_plan.segments.size()) {
|
|
||||||
const size_t next_end_segment_index = best_end_segment_index + 1;
|
|
||||||
Plan candidate_plan;
|
|
||||||
auto candidate_available_cut_output_node_indices = available_cut_output_node_indices;
|
|
||||||
auto candidate_seed = make_segment_seed(base_plan,
|
|
||||||
start_segment_index,
|
|
||||||
next_end_segment_index);
|
|
||||||
build_segment(gf,
|
|
||||||
candidate_plan,
|
|
||||||
candidate_seed,
|
|
||||||
producer_index,
|
|
||||||
candidate_available_cut_output_node_indices,
|
|
||||||
backend,
|
|
||||||
params_tensor_set,
|
|
||||||
log_desc);
|
|
||||||
GGML_ASSERT(!candidate_plan.segments.empty());
|
|
||||||
|
|
||||||
const auto& candidate_segment = candidate_plan.segments.back();
|
|
||||||
if (graph_cut_segment_vram_bytes(candidate_segment) > max_graph_vram_bytes) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
best_end_segment_index = next_end_segment_index;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto best_seed = make_segment_seed(base_plan,
|
|
||||||
start_segment_index,
|
|
||||||
best_end_segment_index);
|
|
||||||
build_segment(gf,
|
|
||||||
merged_plan,
|
|
||||||
best_seed,
|
|
||||||
producer_index,
|
|
||||||
available_cut_output_node_indices,
|
|
||||||
backend,
|
|
||||||
params_tensor_set,
|
|
||||||
log_desc);
|
|
||||||
start_segment_index = best_end_segment_index + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (log_desc != nullptr && merged_plan.segments.size() != base_plan.segments.size()) {
|
|
||||||
LOG_INFO("%s graph cut max_vram=%.2f MB merged %zu segments -> %zu segments",
|
|
||||||
log_desc,
|
|
||||||
max_graph_vram_bytes / 1024.0 / 1024.0,
|
|
||||||
base_plan.segments.size(),
|
|
||||||
merged_plan.segments.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
if (log_desc != nullptr) {
|
|
||||||
LOG_INFO("%s graph cut max_vram budget merge took %lld ms",
|
|
||||||
log_desc,
|
|
||||||
ggml_time_ms() - t_budget_begin);
|
|
||||||
}
|
|
||||||
|
|
||||||
return merged_plan;
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan resolve_plan(ggml_backend_t backend,
|
|
||||||
ggml_cgraph* gf,
|
|
||||||
PlanCache* cache,
|
|
||||||
size_t max_graph_vram_bytes,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc) {
|
|
||||||
GGML_ASSERT(backend != nullptr);
|
|
||||||
GGML_ASSERT(gf != nullptr);
|
|
||||||
GGML_ASSERT(cache != nullptr);
|
|
||||||
|
|
||||||
int64_t t_prepare_begin = ggml_time_ms();
|
|
||||||
Plan base_plan;
|
|
||||||
int64_t t_plan_begin = ggml_time_ms();
|
|
||||||
if (cache->graph_cut_plan.available && plan_matches_graph(gf, cache->graph_cut_plan)) {
|
|
||||||
base_plan = cache->graph_cut_plan;
|
|
||||||
} else {
|
|
||||||
base_plan = build_plan(backend, gf, params_tensor_set, log_desc);
|
|
||||||
cache->graph_cut_plan = base_plan;
|
|
||||||
cache->graph_cut_plan.available = true;
|
|
||||||
cache->budgeted_graph_cut_plan.available = false;
|
|
||||||
if (log_desc != nullptr) {
|
|
||||||
LOG_INFO("%s build cached graph cut plan done (taking %lld ms)", log_desc, ggml_time_ms() - t_plan_begin);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Plan resolved_plan = base_plan;
|
|
||||||
if (max_graph_vram_bytes > 0 && base_plan.has_cuts) {
|
|
||||||
if (cache->budgeted_graph_cut_plan.available &&
|
|
||||||
cache->budgeted_graph_cut_plan_max_vram_bytes == max_graph_vram_bytes &&
|
|
||||||
plan_matches_graph(gf, cache->budgeted_graph_cut_plan)) {
|
|
||||||
resolved_plan = cache->budgeted_graph_cut_plan;
|
|
||||||
} else {
|
|
||||||
resolved_plan = apply_max_vram_budget(gf,
|
|
||||||
base_plan,
|
|
||||||
max_graph_vram_bytes,
|
|
||||||
backend,
|
|
||||||
params_tensor_set,
|
|
||||||
log_desc);
|
|
||||||
cache->budgeted_graph_cut_plan = resolved_plan;
|
|
||||||
cache->budgeted_graph_cut_plan.available = true;
|
|
||||||
cache->budgeted_graph_cut_plan_max_vram_bytes = max_graph_vram_bytes;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resolved_plan;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace sd::ggml_graph_cut
|
|
||||||
@ -1,104 +0,0 @@
|
|||||||
#ifndef __SD_GGML_GRAPH_CUT_H__
|
|
||||||
#define __SD_GGML_GRAPH_CUT_H__
|
|
||||||
|
|
||||||
#include <array>
|
|
||||||
#include <string>
|
|
||||||
#include <unordered_set>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
namespace sd::ggml_graph_cut {
|
|
||||||
|
|
||||||
struct Segment {
|
|
||||||
enum InputType {
|
|
||||||
INPUT_EXTERNAL = 0,
|
|
||||||
INPUT_PREVIOUS_CUT,
|
|
||||||
INPUT_PARAM,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct InputRef {
|
|
||||||
InputType type = INPUT_EXTERNAL;
|
|
||||||
std::string display_name;
|
|
||||||
int leaf_index = -1;
|
|
||||||
int node_index = -1;
|
|
||||||
};
|
|
||||||
|
|
||||||
size_t compute_buffer_size = 0;
|
|
||||||
size_t output_bytes = 0;
|
|
||||||
size_t input_external_bytes = 0;
|
|
||||||
size_t input_previous_cut_bytes = 0;
|
|
||||||
size_t input_param_bytes = 0;
|
|
||||||
std::string group_name;
|
|
||||||
std::vector<int> internal_node_indices;
|
|
||||||
std::vector<int> output_node_indices;
|
|
||||||
std::vector<InputRef> input_refs;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Plan {
|
|
||||||
struct InputShape {
|
|
||||||
int leaf_index = -1;
|
|
||||||
ggml_type type = GGML_TYPE_COUNT;
|
|
||||||
std::array<int64_t, GGML_MAX_DIMS> ne = {0, 0, 0, 0};
|
|
||||||
};
|
|
||||||
|
|
||||||
bool available = false;
|
|
||||||
bool has_cuts = false;
|
|
||||||
bool valid = true;
|
|
||||||
int n_nodes = 0;
|
|
||||||
int n_leafs = 0;
|
|
||||||
std::vector<InputShape> input_shapes;
|
|
||||||
std::vector<Segment> segments;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct PlanCache {
|
|
||||||
Plan graph_cut_plan;
|
|
||||||
Plan budgeted_graph_cut_plan;
|
|
||||||
size_t budgeted_graph_cut_plan_max_vram_bytes = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
static constexpr const char* GGML_RUNNER_CUT_PREFIX = "ggml_runner_cut:";
|
|
||||||
|
|
||||||
bool is_graph_cut_tensor(const ggml_tensor* tensor);
|
|
||||||
std::string make_graph_cut_name(const std::string& group, const std::string& output);
|
|
||||||
void mark_graph_cut(ggml_tensor* tensor, const std::string& group, const std::string& output);
|
|
||||||
int leaf_count(ggml_cgraph* gf);
|
|
||||||
ggml_tensor* leaf_tensor(ggml_cgraph* gf, int leaf_index);
|
|
||||||
ggml_backend_buffer_t tensor_buffer(const ggml_tensor* tensor);
|
|
||||||
ggml_tensor* cache_source_tensor(ggml_tensor* tensor);
|
|
||||||
size_t cache_tensor_bytes(const ggml_tensor* tensor);
|
|
||||||
bool plan_matches_graph(ggml_cgraph* gf, const Plan& plan);
|
|
||||||
ggml_tensor* output_tensor(ggml_cgraph* gf, const Segment& segment, size_t output_index);
|
|
||||||
ggml_tensor* input_tensor(ggml_cgraph* gf, const Segment::InputRef& input_ref);
|
|
||||||
std::vector<ggml_tensor*> param_tensors(ggml_cgraph* gf, const Segment& segment);
|
|
||||||
std::vector<ggml_tensor*> runtime_param_tensors(ggml_cgraph* gf, const Segment& segment, const char* log_desc);
|
|
||||||
std::unordered_set<std::string> collect_future_input_names(ggml_cgraph* gf,
|
|
||||||
const Plan& plan,
|
|
||||||
size_t current_segment_index);
|
|
||||||
ggml_cgraph* build_segment_graph(ggml_cgraph* gf,
|
|
||||||
const Segment& segment,
|
|
||||||
ggml_context** graph_ctx_out);
|
|
||||||
size_t measure_segment_compute_buffer(ggml_backend_t backend,
|
|
||||||
ggml_cgraph* gf,
|
|
||||||
const Segment& segment,
|
|
||||||
const char* log_desc);
|
|
||||||
Plan build_plan(ggml_backend_t backend,
|
|
||||||
ggml_cgraph* gf,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc);
|
|
||||||
Plan apply_max_vram_budget(ggml_cgraph* gf,
|
|
||||||
const Plan& base_plan,
|
|
||||||
size_t max_graph_vram_bytes,
|
|
||||||
ggml_backend_t backend,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc);
|
|
||||||
Plan resolve_plan(ggml_backend_t backend,
|
|
||||||
ggml_cgraph* gf,
|
|
||||||
PlanCache* cache,
|
|
||||||
size_t max_graph_vram_bytes,
|
|
||||||
const std::unordered_set<const ggml_tensor*>& params_tensor_set,
|
|
||||||
const char* log_desc);
|
|
||||||
} // namespace sd::ggml_graph_cut
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@ -1,653 +0,0 @@
|
|||||||
#ifndef __SD_HIDREAM_O1_H__
|
|
||||||
#define __SD_HIDREAM_O1_H__
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <array>
|
|
||||||
#include <cmath>
|
|
||||||
#include <cstring>
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#include "common_dit.hpp"
|
|
||||||
#include "conditioner.hpp"
|
|
||||||
#include "llm.hpp"
|
|
||||||
#include "util.h"
|
|
||||||
|
|
||||||
namespace HiDreamO1 {
|
|
||||||
constexpr int HIDREAM_O1_GRAPH_SIZE = 32768;
|
|
||||||
constexpr int PATCH_SIZE = 32;
|
|
||||||
constexpr int TIMESTEP_TOKEN_NUM = 1;
|
|
||||||
constexpr int IMAGE_TOKEN_ID = 151655;
|
|
||||||
constexpr int VISION_START_TOKEN_ID = 151652;
|
|
||||||
|
|
||||||
static inline std::string repeat_special_token(const std::string& token, int64_t count) {
|
|
||||||
std::string out;
|
|
||||||
out.reserve(static_cast<size_t>(count) * token.size());
|
|
||||||
for (int64_t i = 0; i < count; ++i) {
|
|
||||||
out += token;
|
|
||||||
}
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline std::pair<int, int> calculate_dimensions(int max_size, double ratio) {
|
|
||||||
int width = static_cast<int>(std::sqrt(max_size * max_size * ratio));
|
|
||||||
int height = static_cast<int>(width / ratio);
|
|
||||||
width = (width / PATCH_SIZE) * PATCH_SIZE;
|
|
||||||
height = (height / PATCH_SIZE) * PATCH_SIZE;
|
|
||||||
width = std::max(width, PATCH_SIZE);
|
|
||||||
height = std::max(height, PATCH_SIZE);
|
|
||||||
return {width, height};
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline sd::Tensor<float> resize_to_area(const sd::Tensor<float>& image, int image_size) {
|
|
||||||
int64_t width = image.shape()[0];
|
|
||||||
int64_t height = image.shape()[1];
|
|
||||||
int64_t s_max = static_cast<int64_t>(image_size) * image_size;
|
|
||||||
double scale = std::sqrt(static_cast<double>(s_max) / static_cast<double>(width * height));
|
|
||||||
|
|
||||||
std::vector<std::pair<int64_t, int64_t>> sizes = {
|
|
||||||
{(static_cast<int64_t>(std::llround(width * scale)) / PATCH_SIZE) * PATCH_SIZE, (static_cast<int64_t>(std::llround(height * scale)) / PATCH_SIZE) * PATCH_SIZE},
|
|
||||||
{(static_cast<int64_t>(std::llround(width * scale)) / PATCH_SIZE) * PATCH_SIZE, (static_cast<int64_t>(std::floor(height * scale)) / PATCH_SIZE) * PATCH_SIZE},
|
|
||||||
{(static_cast<int64_t>(std::floor(width * scale)) / PATCH_SIZE) * PATCH_SIZE, (static_cast<int64_t>(std::llround(height * scale)) / PATCH_SIZE) * PATCH_SIZE},
|
|
||||||
{(static_cast<int64_t>(std::floor(width * scale)) / PATCH_SIZE) * PATCH_SIZE, (static_cast<int64_t>(std::floor(height * scale)) / PATCH_SIZE) * PATCH_SIZE},
|
|
||||||
};
|
|
||||||
std::sort(sizes.begin(), sizes.end(), [](const auto& a, const auto& b) {
|
|
||||||
return a.first * a.second > b.first * b.second;
|
|
||||||
});
|
|
||||||
|
|
||||||
std::pair<int64_t, int64_t> new_size = sizes.back();
|
|
||||||
for (const auto& size : sizes) {
|
|
||||||
if (size.first > 0 && size.second > 0 && size.first * size.second <= s_max) {
|
|
||||||
new_size = size;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
double s1 = static_cast<double>(width) / static_cast<double>(new_size.first);
|
|
||||||
double s2 = static_cast<double>(height) / static_cast<double>(new_size.second);
|
|
||||||
sd::Tensor<float> resized;
|
|
||||||
if (s1 < s2) {
|
|
||||||
int64_t resized_h = static_cast<int64_t>(std::llround(height / s1));
|
|
||||||
resized = sd::ops::interpolate(image,
|
|
||||||
{new_size.first, resized_h, image.shape()[2], image.shape()[3]},
|
|
||||||
sd::ops::InterpolateMode::Bicubic);
|
|
||||||
int64_t top = (resized_h - new_size.second) / 2;
|
|
||||||
resized = sd::ops::slice(resized, 1, top, top + new_size.second);
|
|
||||||
} else {
|
|
||||||
int64_t resized_w = static_cast<int64_t>(std::llround(width / s2));
|
|
||||||
resized = sd::ops::interpolate(image,
|
|
||||||
{resized_w, new_size.second, image.shape()[2], image.shape()[3]},
|
|
||||||
sd::ops::InterpolateMode::Bicubic);
|
|
||||||
int64_t left = (resized_w - new_size.first) / 2;
|
|
||||||
resized = sd::ops::slice(resized, 0, left, left + new_size.first);
|
|
||||||
}
|
|
||||||
return resized;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline std::vector<int32_t> build_position_ids(const std::vector<int32_t>& input_ids,
|
|
||||||
const std::vector<std::array<int32_t, 3>>& image_grids,
|
|
||||||
const std::vector<int32_t>& skip_vision_start_token) {
|
|
||||||
std::vector<int32_t> position_ids(4 * input_ids.size(), 0);
|
|
||||||
int image_index = 0;
|
|
||||||
int st = 0;
|
|
||||||
int fix_point = 4096;
|
|
||||||
std::vector<int32_t> out_t;
|
|
||||||
std::vector<int32_t> out_h;
|
|
||||||
std::vector<int32_t> out_w;
|
|
||||||
|
|
||||||
while (st < static_cast<int>(input_ids.size())) {
|
|
||||||
int ed = st;
|
|
||||||
while (ed < static_cast<int>(input_ids.size()) && input_ids[ed] != IMAGE_TOKEN_ID) {
|
|
||||||
ed++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ed >= static_cast<int>(input_ids.size())) {
|
|
||||||
int st_idx = out_t.empty() ? 0 : (*std::max_element(out_t.begin(), out_t.end()) + 1);
|
|
||||||
for (int i = 0; i < static_cast<int>(input_ids.size()) - st; ++i) {
|
|
||||||
out_t.push_back(st_idx + i);
|
|
||||||
out_h.push_back(st_idx + i);
|
|
||||||
out_w.push_back(st_idx + i);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
int text_len = std::max(0, ed - st - skip_vision_start_token[image_index]);
|
|
||||||
int st_idx = out_t.empty() ? 0 : (*std::max_element(out_t.begin(), out_t.end()) + 1);
|
|
||||||
for (int i = 0; i < text_len; ++i) {
|
|
||||||
out_t.push_back(st_idx + i);
|
|
||||||
out_h.push_back(st_idx + i);
|
|
||||||
out_w.push_back(st_idx + i);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto grid = image_grids[image_index];
|
|
||||||
int base;
|
|
||||||
if (skip_vision_start_token[image_index]) {
|
|
||||||
if (fix_point > 0) {
|
|
||||||
base = fix_point;
|
|
||||||
fix_point = 0;
|
|
||||||
} else {
|
|
||||||
base = st_idx;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
base = text_len + st_idx;
|
|
||||||
}
|
|
||||||
for (int32_t ti = 0; ti < grid[0]; ++ti) {
|
|
||||||
for (int32_t hi = 0; hi < grid[1]; ++hi) {
|
|
||||||
for (int32_t wi = 0; wi < grid[2]; ++wi) {
|
|
||||||
out_t.push_back(base + ti);
|
|
||||||
out_h.push_back(base + hi);
|
|
||||||
out_w.push_back(base + wi);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
st = ed + grid[0] * grid[1] * grid[2];
|
|
||||||
image_index++;
|
|
||||||
}
|
|
||||||
|
|
||||||
GGML_ASSERT(out_t.size() == input_ids.size());
|
|
||||||
for (size_t i = 0; i < input_ids.size(); ++i) {
|
|
||||||
// ggml IMROPE consumes 4 flattened position streams:
|
|
||||||
// [t, h, w, e]
|
|
||||||
// llama.cpp's generic Qwen-VL fallback expands text positions as
|
|
||||||
// [pos, pos, pos, 0]. Keep the extra stream zeroed here too.
|
|
||||||
position_ids[i] = out_t[i];
|
|
||||||
position_ids[input_ids.size() + i] = out_h[i];
|
|
||||||
position_ids[input_ids.size() * 2 + i] = out_w[i];
|
|
||||||
position_ids[input_ids.size() * 3 + i] = 0;
|
|
||||||
}
|
|
||||||
return position_ids;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TimestepEmbedder : public GGMLBlock {
|
|
||||||
int frequency_embedding_size = 256;
|
|
||||||
|
|
||||||
TimestepEmbedder(int64_t hidden_size) {
|
|
||||||
blocks["mlp.0"] = std::make_shared<Linear>(frequency_embedding_size, hidden_size, true);
|
|
||||||
blocks["mlp.2"] = std::make_shared<Linear>(hidden_size, hidden_size, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* t) {
|
|
||||||
auto mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["mlp.0"]);
|
|
||||||
auto mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["mlp.2"]);
|
|
||||||
auto emb = ggml_ext_timestep_embedding(ctx->ggml_ctx, t, frequency_embedding_size, 10000, 1000.0f);
|
|
||||||
emb = mlp_0->forward(ctx, emb);
|
|
||||||
emb = ggml_silu_inplace(ctx->ggml_ctx, emb);
|
|
||||||
emb = mlp_2->forward(ctx, emb);
|
|
||||||
return emb;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct BottleneckPatchEmbed : public GGMLBlock {
|
|
||||||
BottleneckPatchEmbed(int64_t in_dim, int64_t pca_dim, int64_t embed_dim) {
|
|
||||||
blocks["proj1"] = std::make_shared<Linear>(in_dim, pca_dim, false);
|
|
||||||
blocks["proj2"] = std::make_shared<Linear>(pca_dim, embed_dim, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
|
||||||
auto proj1 = std::dynamic_pointer_cast<Linear>(blocks["proj1"]);
|
|
||||||
auto proj2 = std::dynamic_pointer_cast<Linear>(blocks["proj2"]);
|
|
||||||
return proj2->forward(ctx, proj1->forward(ctx, x));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct FinalLayer : public GGMLBlock {
|
|
||||||
FinalLayer(int64_t hidden_size, int64_t out_dim) {
|
|
||||||
blocks["linear"] = std::make_shared<Linear>(hidden_size, out_dim, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) {
|
|
||||||
auto linear = std::dynamic_pointer_cast<Linear>(blocks["linear"]);
|
|
||||||
return linear->forward(ctx, x);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct HiDreamO1Params {
|
|
||||||
LLM::LLMParams llm;
|
|
||||||
int patch_size = PATCH_SIZE;
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline HiDreamO1Params make_hidream_o1_params() {
|
|
||||||
HiDreamO1Params params;
|
|
||||||
params.llm.arch = LLM::LLMArch::QWEN3_VL;
|
|
||||||
params.llm.hidden_size = 4096;
|
|
||||||
params.llm.intermediate_size = 12288;
|
|
||||||
params.llm.num_layers = 36;
|
|
||||||
params.llm.num_heads = 32;
|
|
||||||
params.llm.num_kv_heads = 8;
|
|
||||||
params.llm.head_dim = 128;
|
|
||||||
params.llm.qkv_bias = false;
|
|
||||||
params.llm.qk_norm = true;
|
|
||||||
params.llm.vocab_size = 151936;
|
|
||||||
params.llm.rms_norm_eps = 1e-6f;
|
|
||||||
params.llm.vision.arch = LLM::LLMVisionArch::QWEN3_VL;
|
|
||||||
params.llm.vision.num_layers = 27;
|
|
||||||
params.llm.vision.hidden_size = 1152;
|
|
||||||
params.llm.vision.intermediate_size = 4304;
|
|
||||||
params.llm.vision.num_heads = 16;
|
|
||||||
params.llm.vision.out_hidden_size = 4096;
|
|
||||||
params.llm.vision.patch_size = 16;
|
|
||||||
params.llm.vision.spatial_merge_size = 2;
|
|
||||||
params.llm.vision.temporal_patch_size = 2;
|
|
||||||
params.llm.vision.num_position_embeddings = 2304;
|
|
||||||
return params;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct HiDreamO1Model : public GGMLBlock {
|
|
||||||
HiDreamO1Params params;
|
|
||||||
|
|
||||||
HiDreamO1Model() = default;
|
|
||||||
explicit HiDreamO1Model(HiDreamO1Params params)
|
|
||||||
: params(std::move(params)) {
|
|
||||||
blocks["language_model"] = std::make_shared<LLM::TextModel>(this->params.llm);
|
|
||||||
blocks["t_embedder1"] = std::make_shared<TimestepEmbedder>(this->params.llm.hidden_size);
|
|
||||||
blocks["x_embedder"] = std::make_shared<BottleneckPatchEmbed>(this->params.patch_size * this->params.patch_size * 3,
|
|
||||||
this->params.llm.hidden_size / 4,
|
|
||||||
this->params.llm.hidden_size);
|
|
||||||
blocks["final_layer2"] = std::make_shared<FinalLayer>(this->params.llm.hidden_size,
|
|
||||||
this->params.patch_size * this->params.patch_size * 3);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<LLM::TextModel> text_model() {
|
|
||||||
return std::dynamic_pointer_cast<LLM::TextModel>(blocks["language_model"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<TimestepEmbedder> timestep_embedder() {
|
|
||||||
return std::dynamic_pointer_cast<TimestepEmbedder>(blocks["t_embedder1"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<BottleneckPatchEmbed> patch_embedder() {
|
|
||||||
return std::dynamic_pointer_cast<BottleneckPatchEmbed>(blocks["x_embedder"]);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<FinalLayer> final_layer() {
|
|
||||||
return std::dynamic_pointer_cast<FinalLayer>(blocks["final_layer2"]);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct HiDreamO1VisionRunner : public GGMLRunner {
|
|
||||||
HiDreamO1Params params;
|
|
||||||
std::shared_ptr<LLM::VisionModel> model;
|
|
||||||
|
|
||||||
std::vector<int> window_index_vec;
|
|
||||||
std::vector<int> window_inverse_index_vec;
|
|
||||||
std::vector<float> window_mask_vec;
|
|
||||||
std::vector<float> pe_vec;
|
|
||||||
std::array<std::vector<int32_t>, 4> pos_embed_idx_data_;
|
|
||||||
std::array<std::vector<float>, 4> pos_embed_weight_data_;
|
|
||||||
|
|
||||||
HiDreamO1VisionRunner(ggml_backend_t backend,
|
|
||||||
bool offload_params_to_cpu,
|
|
||||||
const String2TensorStorage& tensor_storage_map = {},
|
|
||||||
const std::string& prefix = "model.visual")
|
|
||||||
: GGMLRunner(backend, offload_params_to_cpu),
|
|
||||||
params(make_hidream_o1_params()),
|
|
||||||
model(std::make_shared<LLM::VisionModel>(false, params.llm.vision)) {
|
|
||||||
model->init(params_ctx, tensor_storage_map, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string get_desc() override {
|
|
||||||
return "hidream_o1_vision";
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string& prefix = "model.visual") {
|
|
||||||
model->get_param_tensors(tensors, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_tensor* encode_image(GGMLRunnerContext* runner_ctx, ggml_tensor* image) {
|
|
||||||
return LLM::LLMRunner::encode_image_common(this,
|
|
||||||
compute_ctx,
|
|
||||||
runner_ctx,
|
|
||||||
image,
|
|
||||||
params.llm.vision,
|
|
||||||
model,
|
|
||||||
window_index_vec,
|
|
||||||
window_inverse_index_vec,
|
|
||||||
window_mask_vec,
|
|
||||||
pe_vec,
|
|
||||||
pos_embed_idx_data_,
|
|
||||||
pos_embed_weight_data_);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_cgraph* build_graph(const sd::Tensor<float>& image_tensor) {
|
|
||||||
ggml_cgraph* gf = new_graph_custom(HIDREAM_O1_GRAPH_SIZE);
|
|
||||||
ggml_tensor* image = make_input(image_tensor);
|
|
||||||
auto runner_ctx = get_context();
|
|
||||||
auto image_embeds = encode_image(&runner_ctx, image);
|
|
||||||
ggml_build_forward_expand(gf, image_embeds);
|
|
||||||
return gf;
|
|
||||||
}
|
|
||||||
|
|
||||||
sd::Tensor<float> compute(int n_threads, const sd::Tensor<float>& image) {
|
|
||||||
auto get_graph = [&]() {
|
|
||||||
return build_graph(image);
|
|
||||||
};
|
|
||||||
auto output = GGMLRunner::compute<float>(get_graph, n_threads, false);
|
|
||||||
return output.has_value() ? std::move(output.value()) : sd::Tensor<float>();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct HiDreamO1Runner : public GGMLRunner {
|
|
||||||
HiDreamO1Params params;
|
|
||||||
HiDreamO1Model model;
|
|
||||||
|
|
||||||
std::vector<float> attention_mask_vec;
|
|
||||||
|
|
||||||
HiDreamO1Runner(ggml_backend_t backend,
|
|
||||||
bool offload_params_to_cpu,
|
|
||||||
const String2TensorStorage& tensor_storage_map = {},
|
|
||||||
const std::string& prefix = "model")
|
|
||||||
: GGMLRunner(backend, offload_params_to_cpu),
|
|
||||||
params(make_hidream_o1_params()) {
|
|
||||||
model = HiDreamO1Model(params);
|
|
||||||
model.init(params_ctx, tensor_storage_map, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string get_desc() override {
|
|
||||||
return "hidream_o1";
|
|
||||||
}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string& prefix) {
|
|
||||||
model.get_param_tensors(tensors, prefix);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
|
|
||||||
const sd::Tensor<float>& timestep_tensor,
|
|
||||||
const sd::Tensor<int32_t>& input_ids_tensor,
|
|
||||||
const sd::Tensor<int32_t>& input_pos_tensor,
|
|
||||||
const sd::Tensor<int32_t>& token_types_tensor,
|
|
||||||
const sd::Tensor<int32_t>& vinput_mask_tensor,
|
|
||||||
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds_tensor,
|
|
||||||
const std::vector<sd::Tensor<float>>& ref_images) {
|
|
||||||
ggml_cgraph* gf = new_graph_custom(HIDREAM_O1_GRAPH_SIZE);
|
|
||||||
ggml_tensor* x = make_input(x_tensor);
|
|
||||||
ggml_tensor* timestep = make_input(timestep_tensor);
|
|
||||||
ggml_tensor* input_ids = make_input(input_ids_tensor);
|
|
||||||
ggml_tensor* input_pos = make_input(input_pos_tensor);
|
|
||||||
|
|
||||||
auto text_model = model.text_model();
|
|
||||||
auto t_embedder1 = model.timestep_embedder();
|
|
||||||
auto x_embedder = model.patch_embedder();
|
|
||||||
auto final_layer2 = model.final_layer();
|
|
||||||
|
|
||||||
std::vector<ggml_tensor*> ref_image_tensors;
|
|
||||||
for (const auto& image : ref_images) {
|
|
||||||
ref_image_tensors.push_back(make_input(image));
|
|
||||||
}
|
|
||||||
|
|
||||||
attention_mask_vec = std::vector<float>(static_cast<size_t>(token_types_tensor.shape()[0] * token_types_tensor.shape()[0]), 0.0f);
|
|
||||||
int64_t total_seq_len = token_types_tensor.shape()[0];
|
|
||||||
for (int64_t query = 0; query < total_seq_len; ++query) {
|
|
||||||
bool is_gen = token_types_tensor.values()[static_cast<size_t>(query)] > 0;
|
|
||||||
for (int64_t key = 0; key < total_seq_len; ++key) {
|
|
||||||
if (!is_gen && key > query) {
|
|
||||||
attention_mask_vec[static_cast<size_t>(query * total_seq_len + key)] = -INFINITY;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
auto attention_mask = ggml_new_tensor_2d(compute_ctx, GGML_TYPE_F32, total_seq_len, total_seq_len);
|
|
||||||
set_backend_tensor_data(attention_mask, attention_mask_vec.data());
|
|
||||||
|
|
||||||
auto runner_ctx = get_context();
|
|
||||||
auto txt = text_model->embed(&runner_ctx, input_ids);
|
|
||||||
std::vector<std::pair<int, ggml_tensor*>> image_embeds;
|
|
||||||
image_embeds.reserve(image_embeds_tensor.size());
|
|
||||||
for (const auto& image_embed : image_embeds_tensor) {
|
|
||||||
image_embeds.emplace_back(image_embed.first, make_input(image_embed.second));
|
|
||||||
}
|
|
||||||
txt = LLM::splice_image_embeds(&runner_ctx, txt, image_embeds);
|
|
||||||
|
|
||||||
auto t_emb = t_embedder1->forward(&runner_ctx, timestep);
|
|
||||||
int64_t txt_seq_len = input_ids->ne[0];
|
|
||||||
if (txt_seq_len > 1) {
|
|
||||||
auto prefix = ggml_ext_slice(compute_ctx, txt, 1, 0, txt_seq_len - 1);
|
|
||||||
txt = ggml_concat(compute_ctx, prefix, ggml_reshape_3d(compute_ctx, t_emb, t_emb->ne[0], 1, 1), 1);
|
|
||||||
} else {
|
|
||||||
txt = ggml_reshape_3d(compute_ctx, t_emb, t_emb->ne[0], 1, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto vinputs = DiT::pad_and_patchify(&runner_ctx, x, PATCH_SIZE, PATCH_SIZE);
|
|
||||||
int64_t target_tokens = vinputs->ne[1];
|
|
||||||
for (ggml_tensor* ref_image : ref_image_tensors) {
|
|
||||||
auto ref = DiT::pad_and_patchify(&runner_ctx, ref_image, PATCH_SIZE, PATCH_SIZE);
|
|
||||||
vinputs = ggml_concat(compute_ctx, vinputs, ref, 1);
|
|
||||||
}
|
|
||||||
auto vis = x_embedder->forward(&runner_ctx, vinputs);
|
|
||||||
|
|
||||||
auto inputs_embeds = ggml_concat(compute_ctx, txt, vis, 1);
|
|
||||||
auto hidden_states = text_model->forward_embeds(&runner_ctx, inputs_embeds, input_pos, attention_mask, {});
|
|
||||||
auto x_pred_all = final_layer2->forward(&runner_ctx, hidden_states);
|
|
||||||
|
|
||||||
int64_t x_pred_start = txt_seq_len;
|
|
||||||
if (!vinput_mask_tensor.empty()) {
|
|
||||||
int64_t seq_len = static_cast<int64_t>(vinput_mask_tensor.shape()[0]);
|
|
||||||
int64_t first_vinput = 0;
|
|
||||||
while (first_vinput < seq_len && vinput_mask_tensor.values()[static_cast<size_t>(first_vinput)] == 0) {
|
|
||||||
first_vinput++;
|
|
||||||
}
|
|
||||||
x_pred_start = first_vinput;
|
|
||||||
}
|
|
||||||
auto x_pred = ggml_ext_slice(compute_ctx, x_pred_all, 1, x_pred_start, x_pred_start + target_tokens);
|
|
||||||
x_pred = DiT::unpatchify_and_crop(compute_ctx, x_pred, x->ne[1], x->ne[0], PATCH_SIZE, PATCH_SIZE);
|
|
||||||
|
|
||||||
float sigma = 1.0f - timestep_tensor.values()[0];
|
|
||||||
sigma = std::max(1e-6f, sigma);
|
|
||||||
auto out = ggml_scale(compute_ctx, ggml_sub(compute_ctx, x, x_pred), 1.0f / sigma);
|
|
||||||
|
|
||||||
ggml_build_forward_expand(gf, out);
|
|
||||||
return gf;
|
|
||||||
}
|
|
||||||
|
|
||||||
sd::Tensor<float> compute(int n_threads,
|
|
||||||
const sd::Tensor<float>& x,
|
|
||||||
const sd::Tensor<float>& timestep,
|
|
||||||
const sd::Tensor<int32_t>& input_ids,
|
|
||||||
const sd::Tensor<int32_t>& input_pos,
|
|
||||||
const sd::Tensor<int32_t>& token_types,
|
|
||||||
const sd::Tensor<int32_t>& vinput_mask,
|
|
||||||
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds,
|
|
||||||
const std::vector<sd::Tensor<float>>& ref_images) {
|
|
||||||
auto get_graph = [&]() {
|
|
||||||
return build_graph(x, timestep, input_ids, input_pos, token_types, vinput_mask, image_embeds, ref_images);
|
|
||||||
};
|
|
||||||
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct HiDreamO1Conditioner : public Conditioner {
|
|
||||||
Qwen2Tokenizer tokenizer;
|
|
||||||
std::shared_ptr<HiDreamO1VisionRunner> vision_runner;
|
|
||||||
|
|
||||||
HiDreamO1Conditioner(ggml_backend_t backend,
|
|
||||||
bool offload_params_to_cpu,
|
|
||||||
const String2TensorStorage& tensor_storage_map = {})
|
|
||||||
: vision_runner(std::make_shared<HiDreamO1VisionRunner>(backend, offload_params_to_cpu, tensor_storage_map)) {}
|
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors) override {
|
|
||||||
vision_runner->get_param_tensors(tensors);
|
|
||||||
}
|
|
||||||
|
|
||||||
void alloc_params_buffer() override {
|
|
||||||
vision_runner->alloc_params_buffer();
|
|
||||||
}
|
|
||||||
|
|
||||||
void free_params_buffer() override {
|
|
||||||
vision_runner->free_params_buffer();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t get_params_buffer_size() override {
|
|
||||||
return vision_runner->get_params_buffer_size();
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_max_graph_vram_bytes(size_t max_graph_vram_bytes) override {
|
|
||||||
vision_runner->set_max_graph_vram_bytes(max_graph_vram_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_flash_attention_enabled(bool enabled) override {
|
|
||||||
vision_runner->set_flash_attention_enabled(enabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_weight_adapter(const std::shared_ptr<WeightAdapter>& adapter) override {
|
|
||||||
vision_runner->set_weight_adapter(adapter);
|
|
||||||
}
|
|
||||||
|
|
||||||
SDCondition get_learned_condition(int n_threads,
|
|
||||||
const ConditionerParams& conditioner_params) override {
|
|
||||||
SDCondition result;
|
|
||||||
|
|
||||||
int width = conditioner_params.width;
|
|
||||||
int height = conditioner_params.height;
|
|
||||||
int64_t target_image_len = static_cast<int64_t>(width / PATCH_SIZE) * static_cast<int64_t>(height / PATCH_SIZE);
|
|
||||||
|
|
||||||
std::vector<sd::Tensor<float>> ref_images;
|
|
||||||
if (conditioner_params.ref_images != nullptr) {
|
|
||||||
ref_images = *conditioner_params.ref_images;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<std::pair<int, sd::Tensor<float>>> vlm_images;
|
|
||||||
std::vector<std::array<int32_t, 3>> image_grids;
|
|
||||||
std::vector<int32_t> skip_vision_start;
|
|
||||||
|
|
||||||
std::string prompt = "<|im_start|>user\n";
|
|
||||||
|
|
||||||
if (ref_images.empty()) {
|
|
||||||
prompt += conditioner_params.text;
|
|
||||||
prompt += "<|im_end|>\n<|im_start|>assistant\n<|boi_token|><|tms_token|>";
|
|
||||||
auto input_ids = tokenizer.encode(prompt, nullptr);
|
|
||||||
|
|
||||||
std::vector<int32_t> input_ids_pad = input_ids;
|
|
||||||
input_ids_pad.push_back(VISION_START_TOKEN_ID);
|
|
||||||
input_ids_pad.insert(input_ids_pad.end(), target_image_len - 1, IMAGE_TOKEN_ID);
|
|
||||||
|
|
||||||
image_grids.push_back({1, static_cast<int32_t>(height / PATCH_SIZE), static_cast<int32_t>(width / PATCH_SIZE)});
|
|
||||||
skip_vision_start.push_back(1);
|
|
||||||
|
|
||||||
std::vector<int32_t> token_types(input_ids_pad.size(), 0);
|
|
||||||
int txt_seq_len = static_cast<int>(input_ids.size());
|
|
||||||
int bgn = txt_seq_len - TIMESTEP_TOKEN_NUM;
|
|
||||||
for (int i = bgn; i < static_cast<int>(token_types.size()); ++i) {
|
|
||||||
token_types[i] = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto position_ids = build_position_ids(input_ids_pad, image_grids, skip_vision_start);
|
|
||||||
|
|
||||||
std::vector<int64_t> input_shape{static_cast<int64_t>(input_ids.size())};
|
|
||||||
std::vector<int64_t> position_shape{static_cast<int64_t>(input_ids_pad.size() * 4)};
|
|
||||||
std::vector<int64_t> token_type_shape{static_cast<int64_t>(token_types.size())};
|
|
||||||
std::vector<int32_t> vinput_mask(token_types.size(), 0);
|
|
||||||
for (int64_t i = txt_seq_len; i < static_cast<int64_t>(vinput_mask.size()); ++i) {
|
|
||||||
vinput_mask[static_cast<size_t>(i)] = 1;
|
|
||||||
}
|
|
||||||
std::vector<int64_t> vinput_mask_shape{static_cast<int64_t>(vinput_mask.size())};
|
|
||||||
|
|
||||||
result.c_input_ids = sd::Tensor<int32_t>(input_shape, std::move(input_ids));
|
|
||||||
result.c_position_ids = sd::Tensor<int32_t>(position_shape, position_ids);
|
|
||||||
result.c_token_types = sd::Tensor<int32_t>(token_type_shape, std::move(token_types));
|
|
||||||
result.c_vinput_mask = sd::Tensor<int32_t>(vinput_mask_shape, std::move(vinput_mask));
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
int K = static_cast<int>(ref_images.size());
|
|
||||||
int max_size;
|
|
||||||
if (K == 1) {
|
|
||||||
max_size = std::max(height, width);
|
|
||||||
} else if (K == 2) {
|
|
||||||
max_size = std::max(height, width) * 48 / 64;
|
|
||||||
} else if (K <= 4) {
|
|
||||||
max_size = std::max(height, width) / 2;
|
|
||||||
} else if (K <= 8) {
|
|
||||||
max_size = std::max(height, width) * 24 / 64;
|
|
||||||
} else {
|
|
||||||
max_size = std::max(height, width) / 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
int cond_img_size;
|
|
||||||
if (K <= 4) {
|
|
||||||
cond_img_size = 384;
|
|
||||||
} else if (K <= 8) {
|
|
||||||
cond_img_size = 384 * 48 / 64;
|
|
||||||
} else {
|
|
||||||
cond_img_size = 384 / 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& ref_image : ref_images) {
|
|
||||||
auto resized_ref = resize_to_area(ref_image, max_size);
|
|
||||||
resized_ref = sd::ops::clamp(resized_ref, 0.0f, 1.0f);
|
|
||||||
|
|
||||||
// VLM image: Qwen3-VL expects mean=[0.5]/std=[0.5] (i.e. range [-1,1]),
|
|
||||||
// not CLIP normalization. Resize the already-resized ref directly to
|
|
||||||
// (cond_w, cond_h) to match the Python pipeline's pil_r.resize().
|
|
||||||
auto dims = calculate_dimensions(cond_img_size,
|
|
||||||
static_cast<double>(resized_ref.shape()[0]) / static_cast<double>(resized_ref.shape()[1]));
|
|
||||||
sd::Tensor<float> vlm_image = sd::ops::interpolate(
|
|
||||||
resized_ref,
|
|
||||||
{dims.first, dims.second, resized_ref.shape()[2], resized_ref.shape()[3]});
|
|
||||||
vlm_image = vlm_image * 2.0f - 1.0f;
|
|
||||||
int64_t image_tokens = static_cast<int64_t>(dims.first / PATCH_SIZE) * static_cast<int64_t>(dims.second / PATCH_SIZE);
|
|
||||||
|
|
||||||
auto patch_img = resized_ref * 2.0f - 1.0f;
|
|
||||||
result.c_ref_images.push_back(std::move(patch_img));
|
|
||||||
int64_t prompt_start = static_cast<int64_t>(tokenizer.encode(prompt + "<|vision_start|>", nullptr).size());
|
|
||||||
prompt += "<|vision_start|>";
|
|
||||||
prompt += repeat_special_token("<|image_pad|>", image_tokens);
|
|
||||||
prompt += "<|vision_end|>";
|
|
||||||
vlm_images.emplace_back(static_cast<int>(prompt_start), std::move(vlm_image));
|
|
||||||
image_grids.push_back({1, dims.second / PATCH_SIZE, dims.first / PATCH_SIZE});
|
|
||||||
skip_vision_start.push_back(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt += conditioner_params.text;
|
|
||||||
prompt += "<|im_end|>\n<|im_start|>assistant\n<|boi_token|><|tms_token|>";
|
|
||||||
auto input_ids = tokenizer.encode(prompt, nullptr);
|
|
||||||
|
|
||||||
std::vector<int32_t> input_ids_pad = input_ids;
|
|
||||||
input_ids_pad.push_back(VISION_START_TOKEN_ID);
|
|
||||||
input_ids_pad.insert(input_ids_pad.end(), target_image_len - 1, IMAGE_TOKEN_ID);
|
|
||||||
image_grids.push_back({1, static_cast<int32_t>(height / PATCH_SIZE), static_cast<int32_t>(width / PATCH_SIZE)});
|
|
||||||
skip_vision_start.push_back(1);
|
|
||||||
|
|
||||||
for (const auto& ref_image : result.c_ref_images) {
|
|
||||||
int64_t ref_len = static_cast<int64_t>(ref_image.shape()[0] / PATCH_SIZE) * static_cast<int64_t>(ref_image.shape()[1] / PATCH_SIZE);
|
|
||||||
input_ids_pad.push_back(VISION_START_TOKEN_ID);
|
|
||||||
input_ids_pad.insert(input_ids_pad.end(), ref_len - 1, IMAGE_TOKEN_ID);
|
|
||||||
image_grids.push_back({1, static_cast<int32_t>(ref_image.shape()[1] / PATCH_SIZE), static_cast<int32_t>(ref_image.shape()[0] / PATCH_SIZE)});
|
|
||||||
skip_vision_start.push_back(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<int32_t> token_types(input_ids_pad.size(), 0);
|
|
||||||
int txt_seq_len = static_cast<int>(input_ids.size());
|
|
||||||
int bgn = txt_seq_len - TIMESTEP_TOKEN_NUM;
|
|
||||||
for (int i = bgn; i < static_cast<int>(token_types.size()); ++i) {
|
|
||||||
token_types[i] = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<int64_t> input_shape{static_cast<int64_t>(input_ids.size())};
|
|
||||||
std::vector<int64_t> position_shape{static_cast<int64_t>(input_ids_pad.size() * 4)};
|
|
||||||
std::vector<int64_t> token_type_shape{static_cast<int64_t>(token_types.size())};
|
|
||||||
std::vector<int32_t> vinput_mask(token_types.size(), 0);
|
|
||||||
for (int i = txt_seq_len; i < static_cast<int>(vinput_mask.size()); ++i) {
|
|
||||||
vinput_mask[static_cast<size_t>(i)] = 1;
|
|
||||||
}
|
|
||||||
std::vector<int64_t> vinput_mask_shape{static_cast<int64_t>(vinput_mask.size())};
|
|
||||||
|
|
||||||
result.c_input_ids = sd::Tensor<int32_t>(input_shape, std::move(input_ids));
|
|
||||||
result.c_position_ids = sd::Tensor<int32_t>(position_shape, build_position_ids(input_ids_pad, image_grids, skip_vision_start));
|
|
||||||
result.c_token_types = sd::Tensor<int32_t>(token_type_shape, std::move(token_types));
|
|
||||||
result.c_vinput_mask = sd::Tensor<int32_t>(vinput_mask_shape, std::move(vinput_mask));
|
|
||||||
result.c_image_embeds.reserve(vlm_images.size());
|
|
||||||
for (const auto& vlm_image : vlm_images) {
|
|
||||||
auto image_embed = vision_runner->compute(n_threads, vlm_image.second);
|
|
||||||
if (image_embed.empty()) {
|
|
||||||
LOG_ERROR("hidream_o1 conditioner: encode VLM image failed");
|
|
||||||
return SDCondition();
|
|
||||||
}
|
|
||||||
result.c_image_embeds.emplace_back(vlm_image.first, std::move(image_embed));
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
} // namespace HiDreamO1
|
|
||||||
|
|
||||||
#endif // __SD_HIDREAM_O1_H__
|
|
||||||
777
src/llm.hpp
777
src/llm.hpp
File diff suppressed because it is too large
Load Diff
74
src/lora.hpp
74
src/lora.hpp
@ -129,7 +129,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) {
|
ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) {
|
||||||
ggml_tensor* updown = nullptr;
|
ggml_tensor* updown = nullptr;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -152,17 +152,17 @@ struct LoraModel : public GGMLRunner {
|
|||||||
|
|
||||||
auto iter = lora_tensors.find(lora_up_name);
|
auto iter = lora_tensors.find(lora_up_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lora_up = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lora_up = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(lora_mid_name);
|
iter = lora_tensors.find(lora_mid_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lora_mid = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lora_mid = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(lora_down_name);
|
iter = lora_tensors.find(lora_down_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lora_down = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lora_down = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lora_up == nullptr || lora_down == nullptr) {
|
if (lora_up == nullptr || lora_down == nullptr) {
|
||||||
@ -208,7 +208,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
return updown;
|
return updown;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) {
|
ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) {
|
||||||
ggml_tensor* updown = nullptr;
|
ggml_tensor* updown = nullptr;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -225,7 +225,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
|
|
||||||
auto iter = lora_tensors.find(diff_name);
|
auto iter = lora_tensors.find(diff_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
curr_updown = ggml_ext_cast_f32(ctx, backend, iter->second);
|
curr_updown = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -248,7 +248,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
return updown;
|
return updown;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) {
|
ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) {
|
||||||
ggml_tensor* updown = nullptr;
|
ggml_tensor* updown = nullptr;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -276,33 +276,33 @@ struct LoraModel : public GGMLRunner {
|
|||||||
|
|
||||||
auto iter = lora_tensors.find(hada_1_down_name);
|
auto iter = lora_tensors.find(hada_1_down_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
hada_1_down = ggml_ext_cast_f32(ctx, backend, iter->second);
|
hada_1_down = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(hada_1_up_name);
|
iter = lora_tensors.find(hada_1_up_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
hada_1_up = ggml_ext_cast_f32(ctx, backend, iter->second);
|
hada_1_up = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(hada_1_mid_name);
|
iter = lora_tensors.find(hada_1_mid_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
hada_1_mid = ggml_ext_cast_f32(ctx, backend, iter->second);
|
hada_1_mid = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
hada_1_up = ggml_cont(ctx, ggml_transpose(ctx, hada_1_up));
|
hada_1_up = ggml_cont(ctx, ggml_transpose(ctx, hada_1_up));
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(hada_2_down_name);
|
iter = lora_tensors.find(hada_2_down_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
hada_2_down = ggml_ext_cast_f32(ctx, backend, iter->second);
|
hada_2_down = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(hada_2_up_name);
|
iter = lora_tensors.find(hada_2_up_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
hada_2_up = ggml_ext_cast_f32(ctx, backend, iter->second);
|
hada_2_up = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(hada_2_mid_name);
|
iter = lora_tensors.find(hada_2_mid_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
hada_2_mid = ggml_ext_cast_f32(ctx, backend, iter->second);
|
hada_2_mid = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
hada_2_up = ggml_cont(ctx, ggml_transpose(ctx, hada_2_up));
|
hada_2_up = ggml_cont(ctx, ggml_transpose(ctx, hada_2_up));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,7 +351,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
return updown;
|
return updown;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) {
|
ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) {
|
||||||
ggml_tensor* updown = nullptr;
|
ggml_tensor* updown = nullptr;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -378,24 +378,24 @@ struct LoraModel : public GGMLRunner {
|
|||||||
|
|
||||||
auto iter = lora_tensors.find(lokr_w1_name);
|
auto iter = lora_tensors.find(lokr_w1_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lokr_w1 = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lokr_w1 = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(lokr_w2_name);
|
iter = lora_tensors.find(lokr_w2_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lokr_w2 = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lokr_w2 = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t rank = 1;
|
int64_t rank = 1;
|
||||||
if (lokr_w1 == nullptr) {
|
if (lokr_w1 == nullptr) {
|
||||||
iter = lora_tensors.find(lokr_w1_a_name);
|
iter = lora_tensors.find(lokr_w1_a_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lokr_w1_a = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lokr_w1_a = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(lokr_w1_b_name);
|
iter = lora_tensors.find(lokr_w1_b_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lokr_w1_b = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lokr_w1_b = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lokr_w1_a == nullptr || lokr_w1_b == nullptr) {
|
if (lokr_w1_a == nullptr || lokr_w1_b == nullptr) {
|
||||||
@ -410,12 +410,12 @@ struct LoraModel : public GGMLRunner {
|
|||||||
if (lokr_w2 == nullptr) {
|
if (lokr_w2 == nullptr) {
|
||||||
iter = lora_tensors.find(lokr_w2_a_name);
|
iter = lora_tensors.find(lokr_w2_a_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lokr_w2_a = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lokr_w2_a = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
iter = lora_tensors.find(lokr_w2_b_name);
|
iter = lora_tensors.find(lokr_w2_b_name);
|
||||||
if (iter != lora_tensors.end()) {
|
if (iter != lora_tensors.end()) {
|
||||||
lokr_w2_b = ggml_ext_cast_f32(ctx, backend, iter->second);
|
lokr_w2_b = ggml_ext_cast_f32(ctx, iter->second);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lokr_w2_a == nullptr || lokr_w2_b == nullptr) {
|
if (lokr_w2_a == nullptr || lokr_w2_b == nullptr) {
|
||||||
@ -468,23 +468,23 @@ struct LoraModel : public GGMLRunner {
|
|||||||
return updown;
|
return updown;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_backend_t backend, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) {
|
ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) {
|
||||||
// lora
|
// lora
|
||||||
ggml_tensor* diff = nullptr;
|
ggml_tensor* diff = nullptr;
|
||||||
if (with_lora_and_lokr) {
|
if (with_lora_and_lokr) {
|
||||||
diff = get_lora_weight_diff(model_tensor_name, ctx, backend);
|
diff = get_lora_weight_diff(model_tensor_name, ctx);
|
||||||
}
|
}
|
||||||
// diff
|
// diff
|
||||||
if (diff == nullptr) {
|
if (diff == nullptr) {
|
||||||
diff = get_raw_weight_diff(model_tensor_name, ctx, backend);
|
diff = get_raw_weight_diff(model_tensor_name, ctx);
|
||||||
}
|
}
|
||||||
// loha
|
// loha
|
||||||
if (diff == nullptr) {
|
if (diff == nullptr) {
|
||||||
diff = get_loha_weight_diff(model_tensor_name, ctx, backend);
|
diff = get_loha_weight_diff(model_tensor_name, ctx);
|
||||||
}
|
}
|
||||||
// lokr
|
// lokr
|
||||||
if (diff == nullptr && with_lora_and_lokr) {
|
if (diff == nullptr && with_lora_and_lokr) {
|
||||||
diff = get_lokr_weight_diff(model_tensor_name, ctx, backend);
|
diff = get_lokr_weight_diff(model_tensor_name, ctx);
|
||||||
}
|
}
|
||||||
if (diff != nullptr) {
|
if (diff != nullptr) {
|
||||||
if (ggml_nelements(diff) < ggml_nelements(model_tensor)) {
|
if (ggml_nelements(diff) < ggml_nelements(model_tensor)) {
|
||||||
@ -502,7 +502,6 @@ struct LoraModel : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* get_out_diff(ggml_context* ctx,
|
ggml_tensor* get_out_diff(ggml_context* ctx,
|
||||||
ggml_backend_t backend,
|
|
||||||
ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
WeightAdapter::ForwardParams forward_params,
|
WeightAdapter::ForwardParams forward_params,
|
||||||
const std::string& model_tensor_name) {
|
const std::string& model_tensor_name) {
|
||||||
@ -591,7 +590,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
}
|
}
|
||||||
scale_value *= multiplier;
|
scale_value *= multiplier;
|
||||||
|
|
||||||
auto curr_out_diff = ggml_ext_lokr_forward(ctx, backend, x, lokr_w1, lokr_w1_a, lokr_w1_b, lokr_w2, lokr_w2_a, lokr_w2_b, is_conv2d, forward_params.conv2d, scale_value);
|
auto curr_out_diff = ggml_ext_lokr_forward(ctx, x, lokr_w1, lokr_w1_a, lokr_w1_b, lokr_w2, lokr_w2_a, lokr_w2_b, is_conv2d, forward_params.conv2d, scale_value);
|
||||||
if (out_diff == nullptr) {
|
if (out_diff == nullptr) {
|
||||||
out_diff = curr_out_diff;
|
out_diff = curr_out_diff;
|
||||||
} else {
|
} else {
|
||||||
@ -762,7 +761,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
ggml_tensor* model_tensor = it.second;
|
ggml_tensor* model_tensor = it.second;
|
||||||
|
|
||||||
// lora
|
// lora
|
||||||
ggml_tensor* diff = get_weight_diff(model_tensor_name, runtime_backend, compute_ctx, model_tensor);
|
ggml_tensor* diff = get_weight_diff(model_tensor_name, compute_ctx, model_tensor);
|
||||||
if (diff == nullptr) {
|
if (diff == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -775,7 +774,7 @@ struct LoraModel : public GGMLRunner {
|
|||||||
|
|
||||||
ggml_tensor* final_tensor;
|
ggml_tensor* final_tensor;
|
||||||
if (model_tensor->type != GGML_TYPE_F32 && model_tensor->type != GGML_TYPE_F16) {
|
if (model_tensor->type != GGML_TYPE_F32 && model_tensor->type != GGML_TYPE_F16) {
|
||||||
final_tensor = ggml_ext_cast_f32(compute_ctx, runtime_backend, model_tensor);
|
final_tensor = ggml_ext_cast_f32(compute_ctx, model_tensor);
|
||||||
final_tensor = ggml_add_inplace(compute_ctx, final_tensor, diff);
|
final_tensor = ggml_add_inplace(compute_ctx, final_tensor, diff);
|
||||||
final_tensor = ggml_cpy(compute_ctx, final_tensor, model_tensor);
|
final_tensor = ggml_cpy(compute_ctx, final_tensor, model_tensor);
|
||||||
} else {
|
} else {
|
||||||
@ -842,35 +841,34 @@ public:
|
|||||||
: lora_models(lora_models) {
|
: lora_models(lora_models) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) {
|
ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) {
|
||||||
for (auto& lora_model : lora_models) {
|
for (auto& lora_model : lora_models) {
|
||||||
ggml_tensor* diff = lora_model->get_weight_diff(weight_name, backend, ctx, weight, with_lora_and_lokr);
|
ggml_tensor* diff = lora_model->get_weight_diff(weight_name, ctx, weight, with_lora_and_lokr);
|
||||||
if (diff == nullptr) {
|
if (diff == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (weight->type != GGML_TYPE_F32 && weight->type != GGML_TYPE_F16) {
|
if (weight->type != GGML_TYPE_F32 && weight->type != GGML_TYPE_F16) {
|
||||||
weight = ggml_ext_cast_f32(ctx, backend, weight);
|
weight = ggml_ext_cast_f32(ctx, weight);
|
||||||
}
|
}
|
||||||
weight = ggml_add(ctx, weight, diff);
|
weight = ggml_add(ctx, weight, diff);
|
||||||
}
|
}
|
||||||
return weight;
|
return weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) override {
|
ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) override {
|
||||||
return patch_weight(ctx, backend, weight, weight_name, true);
|
return patch_weight(ctx, weight, weight_name, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_tensor* forward_with_lora(ggml_context* ctx,
|
ggml_tensor* forward_with_lora(ggml_context* ctx,
|
||||||
ggml_backend_t backend,
|
|
||||||
ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
ggml_tensor* w,
|
ggml_tensor* w,
|
||||||
ggml_tensor* b,
|
ggml_tensor* b,
|
||||||
const std::string& prefix,
|
const std::string& prefix,
|
||||||
WeightAdapter::ForwardParams forward_params) override {
|
WeightAdapter::ForwardParams forward_params) override {
|
||||||
w = patch_weight(ctx, backend, w, prefix + "weight", false);
|
w = patch_weight(ctx, w, prefix + "weight", false);
|
||||||
if (b) {
|
if (b) {
|
||||||
b = patch_weight(ctx, backend, b, prefix + "bias", false);
|
b = patch_weight(ctx, b, prefix + "bias", false);
|
||||||
}
|
}
|
||||||
ggml_tensor* out;
|
ggml_tensor* out;
|
||||||
if (forward_params.op_type == ForwardParams::op_type_t::OP_LINEAR) {
|
if (forward_params.op_type == ForwardParams::op_type_t::OP_LINEAR) {
|
||||||
@ -892,7 +890,7 @@ public:
|
|||||||
forward_params.conv2d.scale);
|
forward_params.conv2d.scale);
|
||||||
}
|
}
|
||||||
for (auto& lora_model : lora_models) {
|
for (auto& lora_model : lora_models) {
|
||||||
ggml_tensor* out_diff = lora_model->get_out_diff(ctx, backend, x, forward_params, prefix + "weight");
|
ggml_tensor* out_diff = lora_model->get_out_diff(ctx, x, forward_params, prefix + "weight");
|
||||||
if (out_diff == nullptr) {
|
if (out_diff == nullptr) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -767,8 +767,6 @@ public:
|
|||||||
auto context_x = block->forward(ctx, context, x, c_mod);
|
auto context_x = block->forward(ctx, context, x, c_mod);
|
||||||
context = context_x.first;
|
context = context_x.first;
|
||||||
x = context_x.second;
|
x = context_x.second;
|
||||||
sd::ggml_graph_cut::mark_graph_cut(context, "mmdit.joint_blocks." + std::to_string(i), "context");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "mmdit.joint_blocks." + std::to_string(i), "x");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
x = final_layer->forward(ctx, x, c_mod); // (N, T, patch_size ** 2 * out_channels)
|
x = final_layer->forward(ctx, x, c_mod); // (N, T, patch_size ** 2 * out_channels)
|
||||||
@ -811,11 +809,6 @@ public:
|
|||||||
|
|
||||||
context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536]
|
context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536]
|
||||||
}
|
}
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "mmdit.prelude", "x");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(c, "mmdit.prelude", "c");
|
|
||||||
if (context != nullptr) {
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(context, "mmdit.prelude", "context");
|
|
||||||
}
|
|
||||||
|
|
||||||
x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels)
|
x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels)
|
||||||
|
|
||||||
|
|||||||
352
src/model.cpp
352
src/model.cpp
@ -23,11 +23,24 @@
|
|||||||
|
|
||||||
#include "ggml-alloc.h"
|
#include "ggml-alloc.h"
|
||||||
#include "ggml-backend.h"
|
#include "ggml-backend.h"
|
||||||
|
#include "ggml-cpu.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml_extend_backend.hpp"
|
|
||||||
#include "zip.h"
|
#include "zip.h"
|
||||||
|
|
||||||
#include "name_conversion.h"
|
#include "name_conversion.h"
|
||||||
|
#include "stable-diffusion.h"
|
||||||
|
|
||||||
|
#ifdef SD_USE_METAL
|
||||||
|
#include "ggml-metal.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef SD_USE_VULKAN
|
||||||
|
#include "ggml-vulkan.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef SD_USE_OPENCL
|
||||||
|
#include "ggml-opencl.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
/*================================================= Preprocess ==================================================*/
|
/*================================================= Preprocess ==================================================*/
|
||||||
|
|
||||||
@ -68,7 +81,7 @@ const char* unused_tensors[] = {
|
|||||||
"first_stage_model.bn.",
|
"first_stage_model.bn.",
|
||||||
};
|
};
|
||||||
|
|
||||||
bool is_unused_tensor(const std::string& name) {
|
bool is_unused_tensor(std::string name) {
|
||||||
for (size_t i = 0; i < sizeof(unused_tensors) / sizeof(const char*); i++) {
|
for (size_t i = 0; i < sizeof(unused_tensors) / sizeof(const char*); i++) {
|
||||||
if (starts_with(name, unused_tensors[i])) {
|
if (starts_with(name, unused_tensors[i])) {
|
||||||
return true;
|
return true;
|
||||||
@ -437,10 +450,6 @@ SDVersion ModelLoader::get_sd_version() {
|
|||||||
if (tensor_storage.name.find("model.diffusion_model.joint_blocks.") != std::string::npos) {
|
if (tensor_storage.name.find("model.diffusion_model.joint_blocks.") != std::string::npos) {
|
||||||
return VERSION_SD3;
|
return VERSION_SD3;
|
||||||
}
|
}
|
||||||
if (tensor_storage.name.find("model.x_embedder.proj1.weight") != std::string::npos &&
|
|
||||||
tensor_storage_map.find("model.language_model.layers.0.self_attn.q_proj.weight") != tensor_storage_map.end()) {
|
|
||||||
return VERSION_HIDREAM_O1;
|
|
||||||
}
|
|
||||||
if (tensor_storage.name.find("model.diffusion_model.transformer_blocks.0.img_mod.1.weight") != std::string::npos) {
|
if (tensor_storage.name.find("model.diffusion_model.transformer_blocks.0.img_mod.1.weight") != std::string::npos) {
|
||||||
return VERSION_QWEN_IMAGE;
|
return VERSION_QWEN_IMAGE;
|
||||||
}
|
}
|
||||||
@ -678,8 +687,8 @@ std::map<ggml_type, uint32_t> ModelLoader::get_vae_wtype_stat() {
|
|||||||
return wtype_stat;
|
return wtype_stat;
|
||||||
}
|
}
|
||||||
|
|
||||||
TensorTypeRules parse_tensor_type_rules(const std::string& tensor_type_rules) {
|
static std::vector<std::pair<std::string, ggml_type>> parse_tensor_type_rules(const std::string& tensor_type_rules) {
|
||||||
TensorTypeRules result;
|
std::vector<std::pair<std::string, ggml_type>> result;
|
||||||
for (const auto& item : split_string(tensor_type_rules, ',')) {
|
for (const auto& item : split_string(tensor_type_rules, ',')) {
|
||||||
if (item.size() == 0)
|
if (item.size() == 0)
|
||||||
continue;
|
continue;
|
||||||
@ -734,168 +743,8 @@ void ModelLoader::set_wtype_override(ggml_type wtype, std::string tensor_type_ru
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ModelLoader::process_model_files(bool enable_mmap, bool writable_mmap) {
|
|
||||||
if (model_files_processed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t start_time = ggml_time_ms();
|
|
||||||
|
|
||||||
std::vector<TensorStorage> processed_tensor_storages;
|
|
||||||
for (const auto& [name, tensor_storage] : tensor_storage_map) {
|
|
||||||
if (is_unused_tensor(tensor_storage.name)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
processed_tensor_storages.push_back(tensor_storage);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t file_index = 0; file_index < file_paths_.size(); file_index++) {
|
|
||||||
std::string file_path = file_paths_[file_index];
|
|
||||||
|
|
||||||
std::vector<TensorStorage> file_tensors;
|
|
||||||
for (const auto& ts : processed_tensor_storages) {
|
|
||||||
if (ts.file_index == file_index) {
|
|
||||||
file_tensors.push_back(ts);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (file_tensors.empty()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool is_zip = false;
|
|
||||||
for (auto const& ts : file_tensors) {
|
|
||||||
if (ts.index_in_zip >= 0) {
|
|
||||||
is_zip = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ModelFileData fdata = {};
|
|
||||||
fdata.path = file_path;
|
|
||||||
fdata.is_zip = is_zip;
|
|
||||||
fdata.tensors = std::move(file_tensors);
|
|
||||||
|
|
||||||
if (enable_mmap && !is_zip) {
|
|
||||||
LOG_DEBUG("using mmap for I/O");
|
|
||||||
std::unique_ptr<MmapWrapper> mmapped = MmapWrapper::create(file_path, writable_mmap);
|
|
||||||
if (mmapped) {
|
|
||||||
uint8_t* mmap_data = static_cast<uint8_t*>(mmapped->writable_data());
|
|
||||||
ggml_backend_buffer_t buf_mmap = ggml_backend_cpu_buffer_from_ptr(mmap_data, mmapped->size());
|
|
||||||
if (buf_mmap) {
|
|
||||||
LOG_INFO("using mmap for '%s'", file_path.c_str());
|
|
||||||
fdata.mmbuffer = std::shared_ptr<struct ggml_backend_buffer>(buf_mmap, ggml_backend_buffer_free);
|
|
||||||
} else {
|
|
||||||
LOG_WARN("mmap: failed to create backend buffer for file %s", fdata.path.c_str());
|
|
||||||
}
|
|
||||||
fdata.mmapped = std::shared_ptr<MmapWrapper>(std::move(mmapped));
|
|
||||||
} else {
|
|
||||||
LOG_WARN("failed to memory-map '%s' (falling back to read())", file_path.c_str());
|
|
||||||
}
|
|
||||||
} else if (!is_zip) {
|
|
||||||
LOG_INFO("NOT using mmap for '%s' (mmap disabled by caller)",
|
|
||||||
file_path.c_str());
|
|
||||||
}
|
|
||||||
|
|
||||||
file_data.push_back(std::move(fdata));
|
|
||||||
}
|
|
||||||
|
|
||||||
model_files_processed = true;
|
|
||||||
|
|
||||||
int64_t end_time = ggml_time_ms();
|
|
||||||
int64_t process_time_ms = end_time - start_time;
|
|
||||||
|
|
||||||
LOG_INFO("model files processing completed in %.2fs", process_time_ms / 1000.f);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<MmapTensorStore> ModelLoader::mmap_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
|
||||||
std::set<std::string> ignore_tensors,
|
|
||||||
bool writable_mmap) {
|
|
||||||
process_model_files(true, writable_mmap);
|
|
||||||
|
|
||||||
std::vector<MmapTensorStore> result;
|
|
||||||
uint64_t mapped_bytes = 0;
|
|
||||||
size_t mapped_tensors = 0;
|
|
||||||
|
|
||||||
LOG_DEBUG("memory-mapping tensors...");
|
|
||||||
|
|
||||||
int64_t t_start = ggml_time_ms();
|
|
||||||
|
|
||||||
for (auto& fdata : file_data) {
|
|
||||||
if (!fdata.mmbuffer)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
const std::vector<TensorStorage>& file_tensors = fdata.tensors;
|
|
||||||
|
|
||||||
size_t file_mapped_bytes = 0;
|
|
||||||
size_t file_mapped_tensors = 0;
|
|
||||||
|
|
||||||
for (const auto& tensor_storage : file_tensors) {
|
|
||||||
const std::string& name = tensor_storage.name;
|
|
||||||
|
|
||||||
bool is_ignored = false;
|
|
||||||
for (const auto& ignore_prefix : ignore_tensors) {
|
|
||||||
if (starts_with(name, ignore_prefix)) {
|
|
||||||
is_ignored = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (is_ignored)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
auto it = tensors.find(name);
|
|
||||||
if (it == tensors.end())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ggml_tensor* dst_tensor = it->second;
|
|
||||||
if (dst_tensor == nullptr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (tensor_storage.type != dst_tensor->type)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
size_t tensor_size = tensor_storage.nbytes();
|
|
||||||
size_t tensor_offset = tensor_storage.offset;
|
|
||||||
|
|
||||||
if (tensor_storage.ne[0] != dst_tensor->ne[0] ||
|
|
||||||
tensor_storage.ne[1] != dst_tensor->ne[1] ||
|
|
||||||
tensor_storage.ne[2] != dst_tensor->ne[2] ||
|
|
||||||
tensor_storage.ne[3] != dst_tensor->ne[3] ||
|
|
||||||
tensor_size != ggml_nbytes(dst_tensor)) {
|
|
||||||
// let load_tensors worry about this
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t buf_mmap = fdata.mmbuffer.get();
|
|
||||||
uint8_t* mmap_data = static_cast<uint8_t*>(ggml_backend_buffer_get_base(buf_mmap));
|
|
||||||
dst_tensor->buffer = buf_mmap;
|
|
||||||
dst_tensor->data = mmap_data + tensor_offset;
|
|
||||||
|
|
||||||
file_mapped_bytes += tensor_size;
|
|
||||||
file_mapped_tensors++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (file_mapped_bytes > 0) {
|
|
||||||
mapped_tensors += file_mapped_tensors;
|
|
||||||
mapped_bytes += file_mapped_bytes;
|
|
||||||
result.push_back({fdata.mmapped, fdata.mmbuffer});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t t_end = ggml_time_ms();
|
|
||||||
int64_t duration_ms = t_end - t_start;
|
|
||||||
|
|
||||||
LOG_INFO("memory-mapped %zu tensors in %zu files (%.2f MB), taking %.2fs",
|
|
||||||
mapped_tensors,
|
|
||||||
result.size(),
|
|
||||||
mapped_bytes / (1024.0 * 1024.0),
|
|
||||||
duration_ms / 1000.0);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p, bool enable_mmap) {
|
bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p, bool enable_mmap) {
|
||||||
process_model_files(enable_mmap, false);
|
int64_t process_time_ms = 0;
|
||||||
|
|
||||||
std::atomic<int64_t> read_time_ms(0);
|
std::atomic<int64_t> read_time_ms(0);
|
||||||
std::atomic<int64_t> memcpy_time_ms(0);
|
std::atomic<int64_t> memcpy_time_ms(0);
|
||||||
std::atomic<int64_t> copy_to_backend_time_ms(0);
|
std::atomic<int64_t> copy_to_backend_time_ms(0);
|
||||||
@ -907,25 +756,52 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
|||||||
|
|
||||||
int64_t start_time = ggml_time_ms();
|
int64_t start_time = ggml_time_ms();
|
||||||
|
|
||||||
size_t total_tensors_to_process = 0;
|
std::vector<TensorStorage> processed_tensor_storages;
|
||||||
for (const auto& fdata : file_data) {
|
for (const auto& [name, tensor_storage] : tensor_storage_map) {
|
||||||
total_tensors_to_process += fdata.tensors.size();
|
if (is_unused_tensor(tensor_storage.name)) {
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
processed_tensor_storages.push_back(tensor_storage);
|
||||||
|
}
|
||||||
|
|
||||||
|
process_time_ms = ggml_time_ms() - start_time;
|
||||||
|
|
||||||
bool success = true;
|
bool success = true;
|
||||||
size_t total_tensors_processed = 0;
|
size_t total_tensors_processed = 0;
|
||||||
const int64_t t_start = start_time;
|
const size_t total_tensors_to_process = processed_tensor_storages.size();
|
||||||
|
const int64_t t_start = ggml_time_ms();
|
||||||
int last_n_threads = 1;
|
int last_n_threads = 1;
|
||||||
|
|
||||||
for (auto& fdata : file_data) {
|
for (size_t file_index = 0; file_index < file_paths_.size(); file_index++) {
|
||||||
const std::string& file_path = fdata.path;
|
std::string file_path = file_paths_[file_index];
|
||||||
LOG_DEBUG("loading tensors from %s", file_path.c_str());
|
LOG_DEBUG("loading tensors from %s", file_path.c_str());
|
||||||
|
|
||||||
const std::vector<TensorStorage>& file_tensors = fdata.tensors;
|
std::vector<const TensorStorage*> file_tensors;
|
||||||
|
for (const auto& ts : processed_tensor_storages) {
|
||||||
|
if (ts.file_index == file_index) {
|
||||||
|
file_tensors.push_back(&ts);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (file_tensors.empty()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
bool is_zip = fdata.is_zip;
|
bool is_zip = false;
|
||||||
|
for (auto const& ts : file_tensors) {
|
||||||
|
if (ts->index_in_zip >= 0) {
|
||||||
|
is_zip = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::shared_ptr<MmapWrapper> mmapped = fdata.mmapped;
|
std::unique_ptr<MmapWrapper> mmapped;
|
||||||
|
if (enable_mmap && !is_zip) {
|
||||||
|
LOG_DEBUG("using mmap for I/O");
|
||||||
|
mmapped = MmapWrapper::create(file_path);
|
||||||
|
if (!mmapped) {
|
||||||
|
LOG_WARN("failed to memory-map '%s'", file_path.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int n_threads = is_zip ? 1 : std::min(num_threads_to_use, (int)file_tensors.size());
|
int n_threads = is_zip ? 1 : std::min(num_threads_to_use, (int)file_tensors.size());
|
||||||
if (n_threads < 1) {
|
if (n_threads < 1) {
|
||||||
@ -967,7 +843,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
const TensorStorage& tensor_storage = file_tensors[idx];
|
const TensorStorage& tensor_storage = *file_tensors[idx];
|
||||||
ggml_tensor* dst_tensor = nullptr;
|
ggml_tensor* dst_tensor = nullptr;
|
||||||
|
|
||||||
t0 = ggml_time_ms();
|
t0 = ggml_time_ms();
|
||||||
@ -984,11 +860,6 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// skip mmapped tensors
|
|
||||||
if (dst_tensor->buffer != nullptr && dst_tensor->buffer == fdata.mmbuffer.get()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t nbytes_to_read = tensor_storage.nbytes_to_read();
|
size_t nbytes_to_read = tensor_storage.nbytes_to_read();
|
||||||
|
|
||||||
auto read_data = [&](char* buf, size_t n) {
|
auto read_data = [&](char* buf, size_t n) {
|
||||||
@ -1132,8 +1003,9 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
|
|||||||
}
|
}
|
||||||
|
|
||||||
int64_t end_time = ggml_time_ms();
|
int64_t end_time = ggml_time_ms();
|
||||||
LOG_INFO("loading tensors completed, taking %.2fs (read: %.2fs, memcpy: %.2fs, convert: %.2fs, copy_to_backend: %.2fs)",
|
LOG_INFO("loading tensors completed, taking %.2fs (process: %.2fs, read: %.2fs, memcpy: %.2fs, convert: %.2fs, copy_to_backend: %.2fs)",
|
||||||
(end_time - start_time) / 1000.f,
|
(end_time - start_time) / 1000.f,
|
||||||
|
process_time_ms / 1000.f,
|
||||||
(read_time_ms.load() / (float)last_n_threads) / 1000.f,
|
(read_time_ms.load() / (float)last_n_threads) / 1000.f,
|
||||||
(memcpy_time_ms.load() / (float)last_n_threads) / 1000.f,
|
(memcpy_time_ms.load() / (float)last_n_threads) / 1000.f,
|
||||||
(convert_time_ms.load() / (float)last_n_threads) / 1000.f,
|
(convert_time_ms.load() / (float)last_n_threads) / 1000.f,
|
||||||
@ -1249,6 +1121,91 @@ bool ModelLoader::tensor_should_be_converted(const TensorStorage& tensor_storage
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ModelLoader::save_to_gguf_file(const std::string& file_path, ggml_type type, const std::string& tensor_type_rules_str) {
|
||||||
|
auto tensor_type_rules = parse_tensor_type_rules(tensor_type_rules_str);
|
||||||
|
auto get_tensor_type = [&](const TensorStorage& tensor_storage) -> ggml_type {
|
||||||
|
const std::string& name = tensor_storage.name;
|
||||||
|
ggml_type tensor_type = tensor_storage.type;
|
||||||
|
ggml_type dst_type = type;
|
||||||
|
|
||||||
|
for (const auto& tensor_type_rule : tensor_type_rules) {
|
||||||
|
std::regex pattern(tensor_type_rule.first);
|
||||||
|
if (std::regex_search(name, pattern)) {
|
||||||
|
dst_type = tensor_type_rule.second;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tensor_should_be_converted(tensor_storage, dst_type)) {
|
||||||
|
tensor_type = dst_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
return tensor_type;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto backend = ggml_backend_cpu_init();
|
||||||
|
size_t mem_size = 1 * 1024 * 1024; // for padding
|
||||||
|
mem_size += tensor_storage_map.size() * ggml_tensor_overhead();
|
||||||
|
mem_size += get_params_mem_size(backend, type);
|
||||||
|
LOG_INFO("model tensors mem size: %.2fMB", mem_size / 1024.f / 1024.f);
|
||||||
|
ggml_context* ggml_ctx = ggml_init({mem_size, nullptr, false});
|
||||||
|
|
||||||
|
if (ggml_ctx == nullptr) {
|
||||||
|
LOG_ERROR("ggml_init failed for GGUF writer");
|
||||||
|
ggml_backend_free(backend);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<ggml_tensor*> tensors;
|
||||||
|
std::mutex tensor_mutex;
|
||||||
|
auto on_new_tensor_cb = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) -> bool {
|
||||||
|
const std::string& name = tensor_storage.name;
|
||||||
|
ggml_type tensor_type = get_tensor_type(tensor_storage);
|
||||||
|
|
||||||
|
std::lock_guard<std::mutex> lock(tensor_mutex);
|
||||||
|
ggml_tensor* tensor = ggml_new_tensor(ggml_ctx, tensor_type, tensor_storage.n_dims, tensor_storage.ne);
|
||||||
|
if (tensor == nullptr) {
|
||||||
|
LOG_ERROR("ggml_new_tensor failed");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ggml_set_name(tensor, name.c_str());
|
||||||
|
|
||||||
|
// LOG_DEBUG("%s %d %s %d[%d %d %d %d] %d[%d %d %d %d]", name.c_str(),
|
||||||
|
// ggml_nbytes(tensor), ggml_type_name(tensor_type),
|
||||||
|
// tensor_storage.n_dims,
|
||||||
|
// tensor_storage.ne[0], tensor_storage.ne[1], tensor_storage.ne[2], tensor_storage.ne[3],
|
||||||
|
// tensor->n_dims, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
|
||||||
|
|
||||||
|
if (!tensor->data) {
|
||||||
|
GGML_ASSERT(ggml_nelements(tensor) == 0);
|
||||||
|
// avoid crashing the gguf writer by setting a dummy pointer for zero-sized tensors
|
||||||
|
LOG_DEBUG("setting dummy pointer for zero-sized tensor %s", name.c_str());
|
||||||
|
tensor->data = ggml_get_mem_buffer(ggml_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
*dst_tensor = tensor;
|
||||||
|
tensors.push_back(tensor);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool success = load_tensors(on_new_tensor_cb);
|
||||||
|
ggml_backend_free(backend);
|
||||||
|
LOG_INFO("load tensors done");
|
||||||
|
|
||||||
|
std::string error;
|
||||||
|
if (success) {
|
||||||
|
success = write_gguf_file(file_path, tensors, &error);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!success && !error.empty()) {
|
||||||
|
LOG_ERROR("%s", error.c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_free(ggml_ctx);
|
||||||
|
return success;
|
||||||
|
}
|
||||||
|
|
||||||
int64_t ModelLoader::get_params_mem_size(ggml_backend_t backend, ggml_type type) {
|
int64_t ModelLoader::get_params_mem_size(ggml_backend_t backend, ggml_type type) {
|
||||||
size_t alignment = 128;
|
size_t alignment = 128;
|
||||||
if (backend != nullptr) {
|
if (backend != nullptr) {
|
||||||
@ -1268,3 +1225,28 @@ int64_t ModelLoader::get_params_mem_size(ggml_backend_t backend, ggml_type type)
|
|||||||
|
|
||||||
return mem_size;
|
return mem_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool convert(const char* input_path,
|
||||||
|
const char* vae_path,
|
||||||
|
const char* output_path,
|
||||||
|
sd_type_t output_type,
|
||||||
|
const char* tensor_type_rules,
|
||||||
|
bool convert_name) {
|
||||||
|
ModelLoader model_loader;
|
||||||
|
|
||||||
|
if (!model_loader.init_from_file(input_path)) {
|
||||||
|
LOG_ERROR("init model loader from file failed: '%s'", input_path);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vae_path != nullptr && strlen(vae_path) > 0) {
|
||||||
|
if (!model_loader.init_from_file(vae_path, "vae.")) {
|
||||||
|
LOG_ERROR("init model loader from file failed: '%s'", vae_path);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (convert_name) {
|
||||||
|
model_loader.convert_tensors_name();
|
||||||
|
}
|
||||||
|
return model_loader.save_to_gguf_file(output_path, (ggml_type)output_type, tensor_type_rules);
|
||||||
|
}
|
||||||
|
|||||||
27
src/model.h
27
src/model.h
@ -42,7 +42,6 @@ enum SDVersion {
|
|||||||
VERSION_ANIMA,
|
VERSION_ANIMA,
|
||||||
VERSION_FLUX2,
|
VERSION_FLUX2,
|
||||||
VERSION_FLUX2_KLEIN,
|
VERSION_FLUX2_KLEIN,
|
||||||
VERSION_HIDREAM_O1,
|
|
||||||
VERSION_Z_IMAGE,
|
VERSION_Z_IMAGE,
|
||||||
VERSION_OVIS_IMAGE,
|
VERSION_OVIS_IMAGE,
|
||||||
VERSION_ERNIE_IMAGE,
|
VERSION_ERNIE_IMAGE,
|
||||||
@ -164,7 +163,6 @@ static inline bool sd_version_is_dit(SDVersion version) {
|
|||||||
sd_version_is_sd3(version) ||
|
sd_version_is_sd3(version) ||
|
||||||
sd_version_is_wan(version) ||
|
sd_version_is_wan(version) ||
|
||||||
sd_version_is_qwen_image(version) ||
|
sd_version_is_qwen_image(version) ||
|
||||||
version == VERSION_HIDREAM_O1 ||
|
|
||||||
sd_version_is_anima(version) ||
|
sd_version_is_anima(version) ||
|
||||||
sd_version_is_z_image(version) ||
|
sd_version_is_z_image(version) ||
|
||||||
sd_version_is_ernie_image(version)) {
|
sd_version_is_ernie_image(version)) {
|
||||||
@ -191,31 +189,11 @@ enum PMVersion {
|
|||||||
};
|
};
|
||||||
|
|
||||||
typedef OrderedMap<std::string, TensorStorage> String2TensorStorage;
|
typedef OrderedMap<std::string, TensorStorage> String2TensorStorage;
|
||||||
using TensorTypeRules = std::vector<std::pair<std::string, ggml_type>>;
|
|
||||||
|
|
||||||
TensorTypeRules parse_tensor_type_rules(const std::string& tensor_type_rules);
|
|
||||||
|
|
||||||
class MmapWrapper;
|
|
||||||
|
|
||||||
struct ModelFileData {
|
|
||||||
std::string path;
|
|
||||||
std::vector<TensorStorage> tensors;
|
|
||||||
std::shared_ptr<MmapWrapper> mmapped;
|
|
||||||
std::shared_ptr<struct ggml_backend_buffer> mmbuffer;
|
|
||||||
bool is_zip;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct MmapTensorStore {
|
|
||||||
std::shared_ptr<MmapWrapper> mmapped;
|
|
||||||
std::shared_ptr<struct ggml_backend_buffer> mmbuffer;
|
|
||||||
};
|
|
||||||
|
|
||||||
class ModelLoader {
|
class ModelLoader {
|
||||||
protected:
|
protected:
|
||||||
SDVersion version_ = VERSION_COUNT;
|
SDVersion version_ = VERSION_COUNT;
|
||||||
std::vector<std::string> file_paths_;
|
std::vector<std::string> file_paths_;
|
||||||
std::vector<ModelFileData> file_data;
|
|
||||||
bool model_files_processed = false;
|
|
||||||
String2TensorStorage tensor_storage_map;
|
String2TensorStorage tensor_storage_map;
|
||||||
|
|
||||||
void add_tensor_storage(const TensorStorage& tensor_storage);
|
void add_tensor_storage(const TensorStorage& tensor_storage);
|
||||||
@ -239,10 +217,6 @@ public:
|
|||||||
std::map<ggml_type, uint32_t> get_vae_wtype_stat();
|
std::map<ggml_type, uint32_t> get_vae_wtype_stat();
|
||||||
String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; }
|
String2TensorStorage& get_tensor_storage_map() { return tensor_storage_map; }
|
||||||
void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = "");
|
void set_wtype_override(ggml_type wtype, std::string tensor_type_rules = "");
|
||||||
void process_model_files(bool enable_mmap = false, bool writable_mmap = true);
|
|
||||||
std::vector<MmapTensorStore> mmap_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
|
||||||
std::set<std::string> ignore_tensors = {},
|
|
||||||
bool writable = true);
|
|
||||||
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false);
|
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads = 0, bool use_mmap = false);
|
||||||
bool load_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
bool load_tensors(std::map<std::string, ggml_tensor*>& tensors,
|
||||||
std::set<std::string> ignore_tensors = {},
|
std::set<std::string> ignore_tensors = {},
|
||||||
@ -257,6 +231,7 @@ public:
|
|||||||
return names;
|
return names;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool save_to_gguf_file(const std::string& file_path, ggml_type type, const std::string& tensor_type_rules);
|
||||||
bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type);
|
bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type);
|
||||||
int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT);
|
int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT);
|
||||||
~ModelLoader() = default;
|
~ModelLoader() = default;
|
||||||
|
|||||||
@ -95,7 +95,7 @@ bool read_gguf_file(const std::string& file_path,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool write_gguf_file(const std::string& file_path,
|
bool write_gguf_file(const std::string& file_path,
|
||||||
const std::vector<TensorWriteInfo>& tensors,
|
const std::vector<ggml_tensor*>& tensors,
|
||||||
std::string* error) {
|
std::string* error) {
|
||||||
gguf_context* gguf_ctx = gguf_init_empty();
|
gguf_context* gguf_ctx = gguf_init_empty();
|
||||||
if (gguf_ctx == nullptr) {
|
if (gguf_ctx == nullptr) {
|
||||||
@ -103,8 +103,7 @@ bool write_gguf_file(const std::string& file_path,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const TensorWriteInfo& write_tensor : tensors) {
|
for (ggml_tensor* tensor : tensors) {
|
||||||
ggml_tensor* tensor = write_tensor.tensor;
|
|
||||||
if (tensor == nullptr) {
|
if (tensor == nullptr) {
|
||||||
set_error(error, "null tensor cannot be written to GGUF");
|
set_error(error, "null tensor cannot be written to GGUF");
|
||||||
gguf_free(gguf_ctx);
|
gguf_free(gguf_ctx);
|
||||||
|
|||||||
@ -11,7 +11,7 @@ bool read_gguf_file(const std::string& file_path,
|
|||||||
std::vector<TensorStorage>& tensor_storages,
|
std::vector<TensorStorage>& tensor_storages,
|
||||||
std::string* error = nullptr);
|
std::string* error = nullptr);
|
||||||
bool write_gguf_file(const std::string& file_path,
|
bool write_gguf_file(const std::string& file_path,
|
||||||
const std::vector<TensorWriteInfo>& tensors,
|
const std::vector<ggml_tensor*>& tensors,
|
||||||
std::string* error = nullptr);
|
std::string* error = nullptr);
|
||||||
|
|
||||||
#endif // __SD_MODEL_IO_GGUF_IO_H__
|
#endif // __SD_MODEL_IO_GGUF_IO_H__
|
||||||
|
|||||||
@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#include "binary_io.h"
|
#include "binary_io.h"
|
||||||
#include "json.hpp"
|
#include "json.hpp"
|
||||||
#include "util.h"
|
|
||||||
|
|
||||||
static constexpr size_t ST_HEADER_SIZE_LEN = 8;
|
static constexpr size_t ST_HEADER_SIZE_LEN = 8;
|
||||||
|
|
||||||
@ -61,7 +60,7 @@ bool is_safetensors_file(const std::string& file_path) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ggml_type safetensors_dtype_to_ggml_type(const std::string& dtype) {
|
static ggml_type str_to_ggml_type(const std::string& dtype) {
|
||||||
ggml_type ttype = GGML_TYPE_COUNT;
|
ggml_type ttype = GGML_TYPE_COUNT;
|
||||||
if (dtype == "F16") {
|
if (dtype == "F16") {
|
||||||
ttype = GGML_TYPE_F16;
|
ttype = GGML_TYPE_F16;
|
||||||
@ -155,7 +154,7 @@ bool read_safetensors_file(const std::string& file_path,
|
|||||||
size_t begin = tensor_info["data_offsets"][0].get<size_t>();
|
size_t begin = tensor_info["data_offsets"][0].get<size_t>();
|
||||||
size_t end = tensor_info["data_offsets"][1].get<size_t>();
|
size_t end = tensor_info["data_offsets"][1].get<size_t>();
|
||||||
|
|
||||||
ggml_type type = safetensors_dtype_to_ggml_type(dtype);
|
ggml_type type = str_to_ggml_type(dtype);
|
||||||
if (type == GGML_TYPE_COUNT) {
|
if (type == GGML_TYPE_COUNT) {
|
||||||
set_error(error, "unsupported dtype '" + dtype + "' (tensor '" + name + "')");
|
set_error(error, "unsupported dtype '" + dtype + "' (tensor '" + name + "')");
|
||||||
return false;
|
return false;
|
||||||
@ -222,95 +221,3 @@ bool read_safetensors_file(const std::string& file_path,
|
|||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_type_to_safetensors_dtype(ggml_type type, std::string* dtype) {
|
|
||||||
switch (type) {
|
|
||||||
case GGML_TYPE_F16:
|
|
||||||
*dtype = "F16";
|
|
||||||
return true;
|
|
||||||
case GGML_TYPE_BF16:
|
|
||||||
*dtype = "BF16";
|
|
||||||
return true;
|
|
||||||
case GGML_TYPE_F32:
|
|
||||||
*dtype = "F32";
|
|
||||||
return true;
|
|
||||||
case GGML_TYPE_I32:
|
|
||||||
*dtype = "I32";
|
|
||||||
return true;
|
|
||||||
default:
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool write_safetensors_file(const std::string& file_path,
|
|
||||||
const std::vector<TensorWriteInfo>& tensors,
|
|
||||||
std::string* error) {
|
|
||||||
nlohmann::ordered_json header = nlohmann::ordered_json::object();
|
|
||||||
|
|
||||||
uint64_t data_offset = 0;
|
|
||||||
for (const TensorWriteInfo& write_tensor : tensors) {
|
|
||||||
ggml_tensor* tensor = write_tensor.tensor;
|
|
||||||
if (tensor == nullptr) {
|
|
||||||
set_error(error, "null tensor cannot be written to safetensors");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string name = ggml_get_name(tensor);
|
|
||||||
std::string dtype;
|
|
||||||
if (!ggml_type_to_safetensors_dtype(tensor->type, &dtype)) {
|
|
||||||
set_error(error,
|
|
||||||
"unsupported safetensors dtype '" + std::string(ggml_type_name(tensor->type)) +
|
|
||||||
"' for tensor '" + name + "'");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uint64_t tensor_nbytes = ggml_nbytes(tensor);
|
|
||||||
|
|
||||||
nlohmann::ordered_json json_tensor_info = nlohmann::ordered_json::object();
|
|
||||||
json_tensor_info["dtype"] = dtype;
|
|
||||||
|
|
||||||
nlohmann::ordered_json shape = nlohmann::ordered_json::array();
|
|
||||||
for (int i = 0; i < write_tensor.n_dims; ++i) {
|
|
||||||
shape.push_back(write_tensor.ne[write_tensor.n_dims - 1 - i]);
|
|
||||||
}
|
|
||||||
json_tensor_info["shape"] = shape;
|
|
||||||
|
|
||||||
nlohmann::ordered_json data_offsets = nlohmann::ordered_json::array();
|
|
||||||
data_offsets.push_back(data_offset);
|
|
||||||
data_offsets.push_back(data_offset + tensor_nbytes);
|
|
||||||
json_tensor_info["data_offsets"] = data_offsets;
|
|
||||||
|
|
||||||
header[name] = json_tensor_info;
|
|
||||||
data_offset += tensor_nbytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string header_str = header.dump();
|
|
||||||
|
|
||||||
std::ofstream file(file_path, std::ios::binary);
|
|
||||||
if (!file.is_open()) {
|
|
||||||
set_error(error, "failed to open '" + file_path + "' for writing");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_INFO("trying to save tensors to %s", file_path.c_str());
|
|
||||||
model_io::write_u64(file, header_str.size());
|
|
||||||
file.write(header_str.data(), header_str.size());
|
|
||||||
if (!file) {
|
|
||||||
set_error(error, "failed to write safetensors header to '" + file_path + "'");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const TensorWriteInfo& write_tensor : tensors) {
|
|
||||||
ggml_tensor* tensor = write_tensor.tensor;
|
|
||||||
const std::string name = ggml_get_name(tensor);
|
|
||||||
const size_t tensor_nbytes = ggml_nbytes(tensor);
|
|
||||||
file.write((const char*)tensor->data, tensor_nbytes);
|
|
||||||
if (!file) {
|
|
||||||
set_error(error,
|
|
||||||
"failed to write tensor '" + name + "' to '" + file_path + "'");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|||||||
@ -10,8 +10,5 @@ bool is_safetensors_file(const std::string& file_path);
|
|||||||
bool read_safetensors_file(const std::string& file_path,
|
bool read_safetensors_file(const std::string& file_path,
|
||||||
std::vector<TensorStorage>& tensor_storages,
|
std::vector<TensorStorage>& tensor_storages,
|
||||||
std::string* error = nullptr);
|
std::string* error = nullptr);
|
||||||
bool write_safetensors_file(const std::string& file_path,
|
|
||||||
const std::vector<TensorWriteInfo>& tensors,
|
|
||||||
std::string* error = nullptr);
|
|
||||||
|
|
||||||
#endif // __SD_MODEL_IO_SAFETENSORS_IO_H__
|
#endif // __SD_MODEL_IO_SAFETENSORS_IO_H__
|
||||||
|
|||||||
@ -121,12 +121,6 @@ struct TensorStorage {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TensorWriteInfo {
|
|
||||||
int64_t ne[SD_MAX_DIMS] = {1, 1, 1, 1, 1};
|
|
||||||
int n_dims = 0;
|
|
||||||
ggml_tensor* tensor = nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::function<bool(const TensorStorage&, ggml_tensor**)> on_new_tensor_cb_t;
|
typedef std::function<bool(const TensorStorage&, ggml_tensor**)> on_new_tensor_cb_t;
|
||||||
|
|
||||||
#endif // __SD_TENSOR_STORAGE_H__
|
#endif // __SD_TENSOR_STORAGE_H__
|
||||||
|
|||||||
@ -24,75 +24,6 @@ static inline void preprocessing_set_4d(sd::Tensor<float>& tensor, float value,
|
|||||||
tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value;
|
tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint8_t preprocessing_float_to_u8(float value) {
|
|
||||||
if (value <= 0.0f) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
if (value >= 1.0f) {
|
|
||||||
return 255;
|
|
||||||
}
|
|
||||||
return static_cast<uint8_t>(value * 255.0f + 0.5f);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void preprocessing_tensor_frame_to_sd_image(const sd::Tensor<float>& tensor, int frame_index, uint8_t* image_data) {
|
|
||||||
const auto& shape = tensor.shape();
|
|
||||||
GGML_ASSERT(shape.size() == 4 || shape.size() == 5);
|
|
||||||
GGML_ASSERT(image_data != nullptr);
|
|
||||||
|
|
||||||
const int width = static_cast<int>(shape[0]);
|
|
||||||
const int height = static_cast<int>(shape[1]);
|
|
||||||
const int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
|
|
||||||
const size_t pixels = static_cast<size_t>(width) * static_cast<size_t>(height);
|
|
||||||
const float* src = tensor.data();
|
|
||||||
|
|
||||||
if (shape.size() == 4) {
|
|
||||||
GGML_ASSERT(frame_index >= 0 && frame_index < shape[3]);
|
|
||||||
const size_t frame_stride = pixels * static_cast<size_t>(channel);
|
|
||||||
const float* frame_ptr = src + static_cast<size_t>(frame_index) * frame_stride;
|
|
||||||
if (channel == 3) {
|
|
||||||
const float* c0 = frame_ptr;
|
|
||||||
const float* c1 = frame_ptr + pixels;
|
|
||||||
const float* c2 = frame_ptr + pixels * 2;
|
|
||||||
for (size_t i = 0; i < pixels; ++i) {
|
|
||||||
image_data[i * 3 + 0] = preprocessing_float_to_u8(c0[i]);
|
|
||||||
image_data[i * 3 + 1] = preprocessing_float_to_u8(c1[i]);
|
|
||||||
image_data[i * 3 + 2] = preprocessing_float_to_u8(c2[i]);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < pixels; ++i) {
|
|
||||||
for (int c = 0; c < channel; ++c) {
|
|
||||||
image_data[i * static_cast<size_t>(channel) + static_cast<size_t>(c)] =
|
|
||||||
preprocessing_float_to_u8(frame_ptr[i + pixels * static_cast<size_t>(c)]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
GGML_ASSERT(frame_index >= 0 && frame_index < shape[2]);
|
|
||||||
const size_t channel_stride = pixels * static_cast<size_t>(shape[2]);
|
|
||||||
const float* frame_ptr = src + static_cast<size_t>(frame_index) * pixels;
|
|
||||||
if (channel == 3) {
|
|
||||||
const float* c0 = frame_ptr;
|
|
||||||
const float* c1 = frame_ptr + channel_stride;
|
|
||||||
const float* c2 = frame_ptr + channel_stride * 2;
|
|
||||||
for (size_t i = 0; i < pixels; ++i) {
|
|
||||||
image_data[i * 3 + 0] = preprocessing_float_to_u8(c0[i]);
|
|
||||||
image_data[i * 3 + 1] = preprocessing_float_to_u8(c1[i]);
|
|
||||||
image_data[i * 3 + 2] = preprocessing_float_to_u8(c2[i]);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < pixels; ++i) {
|
|
||||||
for (int c = 0; c < channel; ++c) {
|
|
||||||
image_data[i * static_cast<size_t>(channel) + static_cast<size_t>(c)] =
|
|
||||||
preprocessing_float_to_u8(frame_ptr[i + channel_stride * static_cast<size_t>(c)]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t image) {
|
static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t image) {
|
||||||
sd::Tensor<float> tensor({static_cast<int64_t>(image.width), static_cast<int64_t>(image.height), static_cast<int64_t>(image.channel), 1});
|
sd::Tensor<float> tensor({static_cast<int64_t>(image.width), static_cast<int64_t>(image.height), static_cast<int64_t>(image.channel), 1});
|
||||||
for (uint32_t y = 0; y < image.height; ++y) {
|
for (uint32_t y = 0; y < image.height; ++y) {
|
||||||
@ -108,7 +39,20 @@ static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t imag
|
|||||||
static inline void preprocessing_tensor_to_sd_image(const sd::Tensor<float>& tensor, uint8_t* image_data) {
|
static inline void preprocessing_tensor_to_sd_image(const sd::Tensor<float>& tensor, uint8_t* image_data) {
|
||||||
GGML_ASSERT(tensor.dim() == 4);
|
GGML_ASSERT(tensor.dim() == 4);
|
||||||
GGML_ASSERT(tensor.shape()[3] == 1);
|
GGML_ASSERT(tensor.shape()[3] == 1);
|
||||||
preprocessing_tensor_frame_to_sd_image(tensor, 0, image_data);
|
GGML_ASSERT(image_data != nullptr);
|
||||||
|
|
||||||
|
int width = static_cast<int>(tensor.shape()[0]);
|
||||||
|
int height = static_cast<int>(tensor.shape()[1]);
|
||||||
|
int channel = static_cast<int>(tensor.shape()[2]);
|
||||||
|
for (int y = 0; y < height; ++y) {
|
||||||
|
for (int x = 0; x < width; ++x) {
|
||||||
|
for (int c = 0; c < channel; ++c) {
|
||||||
|
float value = preprocessing_get_4d(tensor, x, y, c, 0);
|
||||||
|
value = std::min(1.0f, std::max(0.0f, value));
|
||||||
|
image_data[(y * width + x) * channel + c] = static_cast<uint8_t>(std::round(value * 255.0f));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline sd::Tensor<float> gaussian_kernel_tensor(int kernel_size) {
|
static inline sd::Tensor<float> gaussian_kernel_tensor(int kernel_size) {
|
||||||
|
|||||||
@ -95,7 +95,9 @@ namespace Qwen {
|
|||||||
|
|
||||||
float scale = 1.f / 32.f;
|
float scale = 1.f / 32.f;
|
||||||
bool force_prec_f32 = false;
|
bool force_prec_f32 = false;
|
||||||
|
#ifdef SD_USE_VULKAN
|
||||||
|
force_prec_f32 = true;
|
||||||
|
#endif
|
||||||
// The purpose of the scale here is to prevent NaN issues in certain situations.
|
// The purpose of the scale here is to prevent NaN issues in certain situations.
|
||||||
// For example when using CUDA but the weights are k-quants (not all prompts).
|
// For example when using CUDA but the weights are k-quants (not all prompts).
|
||||||
blocks["to_out.0"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, out_dim, out_bias, false, force_prec_f32, scale));
|
blocks["to_out.0"] = std::shared_ptr<GGMLBlock>(new Linear(inner_dim, out_dim, out_bias, false, force_prec_f32, scale));
|
||||||
@ -122,10 +124,6 @@ namespace Qwen {
|
|||||||
auto to_v = std::dynamic_pointer_cast<Linear>(blocks["to_v"]);
|
auto to_v = std::dynamic_pointer_cast<Linear>(blocks["to_v"]);
|
||||||
auto to_out_0 = std::dynamic_pointer_cast<Linear>(blocks["to_out.0"]);
|
auto to_out_0 = std::dynamic_pointer_cast<Linear>(blocks["to_out.0"]);
|
||||||
|
|
||||||
if (sd_backend_is(ctx->backend, "Vulkan")) {
|
|
||||||
to_out_0->set_force_prec_f32(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto norm_added_q = std::dynamic_pointer_cast<UnaryBlock>(blocks["norm_added_q"]);
|
auto norm_added_q = std::dynamic_pointer_cast<UnaryBlock>(blocks["norm_added_q"]);
|
||||||
auto norm_added_k = std::dynamic_pointer_cast<UnaryBlock>(blocks["norm_added_k"]);
|
auto norm_added_k = std::dynamic_pointer_cast<UnaryBlock>(blocks["norm_added_k"]);
|
||||||
|
|
||||||
@ -412,9 +410,6 @@ namespace Qwen {
|
|||||||
auto img = img_in->forward(ctx, x);
|
auto img = img_in->forward(ctx, x);
|
||||||
auto txt = txt_norm->forward(ctx, context);
|
auto txt = txt_norm->forward(ctx, context);
|
||||||
txt = txt_in->forward(ctx, txt);
|
txt = txt_in->forward(ctx, txt);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(img, "qwen_image.prelude", "img");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt, "qwen_image.prelude", "txt");
|
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(t_emb, "qwen_image.prelude", "t_emb");
|
|
||||||
|
|
||||||
for (int i = 0; i < params.num_layers; i++) {
|
for (int i = 0; i < params.num_layers; i++) {
|
||||||
auto block = std::dynamic_pointer_cast<QwenImageTransformerBlock>(blocks["transformer_blocks." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<QwenImageTransformerBlock>(blocks["transformer_blocks." + std::to_string(i)]);
|
||||||
@ -422,8 +417,6 @@ namespace Qwen {
|
|||||||
auto result = block->forward(ctx, img, txt, t_emb, pe, modulate_index);
|
auto result = block->forward(ctx, img, txt, t_emb, pe, modulate_index);
|
||||||
img = result.first;
|
img = result.first;
|
||||||
txt = result.second;
|
txt = result.second;
|
||||||
sd::ggml_graph_cut::mark_graph_cut(img, "qwen_image.transformer_blocks." + std::to_string(i), "img");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt, "qwen_image.transformer_blocks." + std::to_string(i), "txt");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.zero_cond_t) {
|
if (params.zero_cond_t) {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -251,8 +251,7 @@ public:
|
|||||||
ggml_tensor* x,
|
ggml_tensor* x,
|
||||||
ggml_tensor* past_bias = nullptr,
|
ggml_tensor* past_bias = nullptr,
|
||||||
ggml_tensor* attention_mask = nullptr,
|
ggml_tensor* attention_mask = nullptr,
|
||||||
ggml_tensor* relative_position_bucket = nullptr,
|
ggml_tensor* relative_position_bucket = nullptr) {
|
||||||
const std::string& graph_cut_prefix = "") {
|
|
||||||
// x: [N, n_token, model_dim]
|
// x: [N, n_token, model_dim]
|
||||||
for (int i = 0; i < num_layers; i++) {
|
for (int i = 0; i < num_layers; i++) {
|
||||||
auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]);
|
||||||
@ -260,9 +259,6 @@ public:
|
|||||||
auto ret = block->forward(ctx, x, past_bias, attention_mask, relative_position_bucket);
|
auto ret = block->forward(ctx, x, past_bias, attention_mask, relative_position_bucket);
|
||||||
x = ret.first;
|
x = ret.first;
|
||||||
past_bias = ret.second;
|
past_bias = ret.second;
|
||||||
if (!graph_cut_prefix.empty()) {
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, graph_cut_prefix + ".block." + std::to_string(i), "x");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto final_layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["final_layer_norm"]);
|
auto final_layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["final_layer_norm"]);
|
||||||
@ -309,8 +305,7 @@ public:
|
|||||||
auto encoder = std::dynamic_pointer_cast<T5Stack>(blocks["encoder"]);
|
auto encoder = std::dynamic_pointer_cast<T5Stack>(blocks["encoder"]);
|
||||||
|
|
||||||
auto x = shared->forward(ctx, input_ids);
|
auto x = shared->forward(ctx, input_ids);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "t5.prelude", "x");
|
x = encoder->forward(ctx, x, past_bias, attention_mask, relative_position_bucket);
|
||||||
x = encoder->forward(ctx, x, past_bias, attention_mask, relative_position_bucket, "t5");
|
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|||||||
263
src/tensor.hpp
263
src/tensor.hpp
@ -815,202 +815,11 @@ namespace sd {
|
|||||||
namespace ops {
|
namespace ops {
|
||||||
enum class InterpolateMode {
|
enum class InterpolateMode {
|
||||||
Nearest,
|
Nearest,
|
||||||
NearestExact,
|
|
||||||
NearestMax,
|
NearestMax,
|
||||||
NearestMin,
|
NearestMin,
|
||||||
NearestAvg,
|
NearestAvg,
|
||||||
Bilinear,
|
|
||||||
Bicubic,
|
|
||||||
Lanczos,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
inline bool is_nearest_like_interpolate_mode(InterpolateMode mode) {
|
|
||||||
return mode == InterpolateMode::Nearest ||
|
|
||||||
mode == InterpolateMode::NearestExact ||
|
|
||||||
mode == InterpolateMode::NearestMax ||
|
|
||||||
mode == InterpolateMode::NearestMin ||
|
|
||||||
mode == InterpolateMode::NearestAvg;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool is_2d_filter_interpolate_mode(InterpolateMode mode) {
|
|
||||||
return mode == InterpolateMode::Bilinear ||
|
|
||||||
mode == InterpolateMode::Bicubic ||
|
|
||||||
mode == InterpolateMode::Lanczos;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int64_t nearest_exact_interpolate_index(int64_t output_index,
|
|
||||||
int64_t input_size,
|
|
||||||
int64_t output_size) {
|
|
||||||
const double scale = static_cast<double>(input_size) / static_cast<double>(output_size);
|
|
||||||
const double center = (static_cast<double>(output_index) + 0.5) * scale - 0.5;
|
|
||||||
return std::min(std::max<int64_t>(static_cast<int64_t>(std::floor(center + 0.5)), 0), input_size - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline double linear_interpolate_weight(double x) {
|
|
||||||
x = std::abs(x);
|
|
||||||
return x < 1.0 ? 1.0 - x : 0.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline double cubic_interpolate_weight(double x) {
|
|
||||||
constexpr double a = -0.75; // Match PyTorch bicubic interpolation.
|
|
||||||
x = std::abs(x);
|
|
||||||
if (x <= 1.0) {
|
|
||||||
return ((a + 2.0) * x - (a + 3.0)) * x * x + 1.0;
|
|
||||||
}
|
|
||||||
if (x < 2.0) {
|
|
||||||
return ((a * x - 5.0 * a) * x + 8.0 * a) * x - 4.0 * a;
|
|
||||||
}
|
|
||||||
return 0.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline double sinc(double x) {
|
|
||||||
constexpr double pi = 3.14159265358979323846;
|
|
||||||
if (std::abs(x) < 1e-12) {
|
|
||||||
return 1.0;
|
|
||||||
}
|
|
||||||
const double pix = pi * x;
|
|
||||||
return std::sin(pix) / pix;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline double lanczos_interpolate_weight(double x) {
|
|
||||||
constexpr double radius = 3.0;
|
|
||||||
x = std::abs(x);
|
|
||||||
if (x >= radius) {
|
|
||||||
return 0.0;
|
|
||||||
}
|
|
||||||
return sinc(x) * sinc(x / radius);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct InterpolateContributor {
|
|
||||||
int64_t index;
|
|
||||||
double weight;
|
|
||||||
};
|
|
||||||
|
|
||||||
inline std::vector<std::vector<InterpolateContributor>> make_interpolate_contributors(
|
|
||||||
int64_t input_size,
|
|
||||||
int64_t output_size,
|
|
||||||
InterpolateMode mode,
|
|
||||||
bool antialias) {
|
|
||||||
std::vector<std::vector<InterpolateContributor>> contributors(static_cast<size_t>(output_size));
|
|
||||||
const double scale = static_cast<double>(input_size) / static_cast<double>(output_size);
|
|
||||||
const double filter_scale = antialias ? std::max(1.0, scale) : 1.0;
|
|
||||||
|
|
||||||
for (int64_t out = 0; out < output_size; ++out) {
|
|
||||||
const double center = (static_cast<double>(out) + 0.5) * scale - 0.5;
|
|
||||||
int64_t start = 0;
|
|
||||||
int64_t end = 0;
|
|
||||||
|
|
||||||
if (mode == InterpolateMode::Bilinear) {
|
|
||||||
const double support = filter_scale;
|
|
||||||
start = static_cast<int64_t>(std::ceil(center - support));
|
|
||||||
end = static_cast<int64_t>(std::floor(center + support));
|
|
||||||
} else if (mode == InterpolateMode::Bicubic) {
|
|
||||||
const double support = 2.0 * filter_scale;
|
|
||||||
start = static_cast<int64_t>(std::ceil(center - support));
|
|
||||||
end = static_cast<int64_t>(std::floor(center + support));
|
|
||||||
} else if (mode == InterpolateMode::Lanczos) {
|
|
||||||
const double support = 3.0 * filter_scale;
|
|
||||||
start = static_cast<int64_t>(std::ceil(center - support));
|
|
||||||
end = static_cast<int64_t>(std::floor(center + support));
|
|
||||||
} else {
|
|
||||||
tensor_throw_invalid_argument("Unsupported 2D filter interpolate mode: mode=" +
|
|
||||||
std::to_string(static_cast<int>(mode)));
|
|
||||||
}
|
|
||||||
|
|
||||||
double weight_sum = 0.0;
|
|
||||||
std::vector<InterpolateContributor>& axis_contributors = contributors[static_cast<size_t>(out)];
|
|
||||||
axis_contributors.reserve(static_cast<size_t>(end - start + 1));
|
|
||||||
|
|
||||||
for (int64_t in = start; in <= end; ++in) {
|
|
||||||
double weight = 0.0;
|
|
||||||
if (mode == InterpolateMode::Bilinear) {
|
|
||||||
weight = linear_interpolate_weight((center - static_cast<double>(in)) / filter_scale);
|
|
||||||
} else if (mode == InterpolateMode::Bicubic) {
|
|
||||||
weight = cubic_interpolate_weight((center - static_cast<double>(in)) / filter_scale);
|
|
||||||
} else {
|
|
||||||
weight = lanczos_interpolate_weight((center - static_cast<double>(in)) / filter_scale);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (weight == 0.0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int64_t clamped_index = std::min(std::max<int64_t>(in, 0), input_size - 1);
|
|
||||||
axis_contributors.push_back({clamped_index, weight});
|
|
||||||
weight_sum += weight;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((antialias || mode == InterpolateMode::Lanczos) &&
|
|
||||||
std::abs(weight_sum) > 1e-12) {
|
|
||||||
for (auto& contributor : axis_contributors) {
|
|
||||||
contributor.weight /= weight_sum;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (axis_contributors.empty()) {
|
|
||||||
const int64_t nearest = std::min(
|
|
||||||
std::max<int64_t>(static_cast<int64_t>(std::floor(center + 0.5)), 0),
|
|
||||||
input_size - 1);
|
|
||||||
axis_contributors.push_back({nearest, 1.0});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return contributors;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline Tensor<T> interpolate_2d_filter(const Tensor<T>& input,
|
|
||||||
const std::vector<int64_t>& output_shape,
|
|
||||||
InterpolateMode mode,
|
|
||||||
bool antialias) {
|
|
||||||
if (input.dim() < 2) {
|
|
||||||
tensor_throw_invalid_argument("2D filter interpolate requires rank >= 2: input_shape=" +
|
|
||||||
tensor_shape_to_string(input.shape()) + ", output_shape=" +
|
|
||||||
tensor_shape_to_string(output_shape));
|
|
||||||
}
|
|
||||||
for (size_t i = 2; i < output_shape.size(); ++i) {
|
|
||||||
if (input.shape()[i] != output_shape[i]) {
|
|
||||||
tensor_throw_invalid_argument("2D filter interpolate only supports resizing dimensions 0 and 1: input_shape=" +
|
|
||||||
tensor_shape_to_string(input.shape()) + ", output_shape=" +
|
|
||||||
tensor_shape_to_string(output_shape));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Tensor<T> output(output_shape);
|
|
||||||
const int64_t input_width = input.shape()[0];
|
|
||||||
const int64_t input_height = input.shape()[1];
|
|
||||||
const int64_t output_width = output_shape[0];
|
|
||||||
const int64_t output_height = output_shape[1];
|
|
||||||
const int64_t input_plane = input_width * input_height;
|
|
||||||
const int64_t output_plane = output_width * output_height;
|
|
||||||
const int64_t plane_count = input.numel() / input_plane;
|
|
||||||
|
|
||||||
auto x_contributors = make_interpolate_contributors(input_width, output_width, mode, antialias);
|
|
||||||
auto y_contributors = make_interpolate_contributors(input_height, output_height, mode, antialias);
|
|
||||||
|
|
||||||
for (int64_t plane = 0; plane < plane_count; ++plane) {
|
|
||||||
const int64_t input_plane_offset = plane * input_plane;
|
|
||||||
const int64_t output_plane_offset = plane * output_plane;
|
|
||||||
for (int64_t y = 0; y < output_height; ++y) {
|
|
||||||
const auto& y_axis = y_contributors[static_cast<size_t>(y)];
|
|
||||||
for (int64_t x = 0; x < output_width; ++x) {
|
|
||||||
const auto& x_axis = x_contributors[static_cast<size_t>(x)];
|
|
||||||
double value = 0.0;
|
|
||||||
for (const auto& yc : y_axis) {
|
|
||||||
const int64_t input_row_offset = input_plane_offset + yc.index * input_width;
|
|
||||||
for (const auto& xc : x_axis) {
|
|
||||||
value += static_cast<double>(input.data()[input_row_offset + xc.index]) *
|
|
||||||
xc.weight * yc.weight;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
output.data()[output_plane_offset + y * output_width + x] = static_cast<T>(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return output;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline int64_t normalize_slice_bound(int64_t index, int64_t dim_size) {
|
inline int64_t normalize_slice_bound(int64_t index, int64_t dim_size) {
|
||||||
if (index < 0) {
|
if (index < 0) {
|
||||||
index += dim_size;
|
index += dim_size;
|
||||||
@ -1205,20 +1014,17 @@ namespace sd {
|
|||||||
inline Tensor<T> interpolate(const Tensor<T>& input,
|
inline Tensor<T> interpolate(const Tensor<T>& input,
|
||||||
std::vector<int64_t> output_shape,
|
std::vector<int64_t> output_shape,
|
||||||
InterpolateMode mode = InterpolateMode::Nearest,
|
InterpolateMode mode = InterpolateMode::Nearest,
|
||||||
bool align_corners = false,
|
bool align_corners = false) {
|
||||||
bool antialias = false) {
|
const bool is_nearest_like_mode = (mode == InterpolateMode::Nearest ||
|
||||||
const bool is_nearest_like_mode = is_nearest_like_interpolate_mode(mode);
|
mode == InterpolateMode::NearestMax ||
|
||||||
const bool is_2d_filter_mode = is_2d_filter_interpolate_mode(mode);
|
mode == InterpolateMode::NearestMin ||
|
||||||
if (!is_nearest_like_mode && !is_2d_filter_mode) {
|
mode == InterpolateMode::NearestAvg);
|
||||||
tensor_throw_invalid_argument("Unsupported interpolate mode: mode=" +
|
if (!is_nearest_like_mode) {
|
||||||
std::to_string(static_cast<int>(mode)));
|
tensor_throw_invalid_argument("Only nearest-like interpolate modes are implemented, got mode=" +
|
||||||
}
|
|
||||||
if (antialias && !is_2d_filter_mode) {
|
|
||||||
tensor_throw_invalid_argument("Tensor interpolate antialias requires a 2D filter mode: mode=" +
|
|
||||||
std::to_string(static_cast<int>(mode)));
|
std::to_string(static_cast<int>(mode)));
|
||||||
}
|
}
|
||||||
if (align_corners) {
|
if (align_corners) {
|
||||||
tensor_throw_invalid_argument("align_corners is not supported for tensor interpolate: input_shape=" +
|
tensor_throw_invalid_argument("align_corners is not supported for nearest-like interpolate: input_shape=" +
|
||||||
tensor_shape_to_string(input.shape()) + ", output_shape=" +
|
tensor_shape_to_string(input.shape()) + ", output_shape=" +
|
||||||
tensor_shape_to_string(output_shape));
|
tensor_shape_to_string(output_shape));
|
||||||
}
|
}
|
||||||
@ -1245,10 +1051,6 @@ namespace sd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_2d_filter_mode) {
|
|
||||||
return interpolate_2d_filter(input, output_shape, mode, antialias);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool has_downsampling = false;
|
bool has_downsampling = false;
|
||||||
for (int64_t i = 0; i < input.dim(); ++i) {
|
for (int64_t i = 0; i < input.dim(); ++i) {
|
||||||
if (input.shape()[i] > output_shape[i]) {
|
if (input.shape()[i] > output_shape[i]) {
|
||||||
@ -1258,21 +1060,13 @@ namespace sd {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Tensor<T> output(std::move(output_shape));
|
Tensor<T> output(std::move(output_shape));
|
||||||
if (mode == InterpolateMode::Nearest ||
|
if (mode == InterpolateMode::Nearest || !has_downsampling) {
|
||||||
mode == InterpolateMode::NearestExact ||
|
|
||||||
!has_downsampling) {
|
|
||||||
for (int64_t flat = 0; flat < output.numel(); ++flat) {
|
for (int64_t flat = 0; flat < output.numel(); ++flat) {
|
||||||
std::vector<int64_t> output_coord = tensor_unravel_index(flat, output.shape());
|
std::vector<int64_t> output_coord = tensor_unravel_index(flat, output.shape());
|
||||||
std::vector<int64_t> input_coord(static_cast<size_t>(input.dim()), 0);
|
std::vector<int64_t> input_coord(static_cast<size_t>(input.dim()), 0);
|
||||||
for (size_t i = 0; i < static_cast<size_t>(input.dim()); ++i) {
|
for (size_t i = 0; i < static_cast<size_t>(input.dim()); ++i) {
|
||||||
if (mode == InterpolateMode::NearestExact) {
|
|
||||||
input_coord[i] = nearest_exact_interpolate_index(output_coord[i],
|
|
||||||
input.shape()[i],
|
|
||||||
output.shape()[i]);
|
|
||||||
} else {
|
|
||||||
input_coord[i] = output_coord[i] * input.shape()[i] / output.shape()[i];
|
input_coord[i] = output_coord[i] * input.shape()[i] / output.shape()[i];
|
||||||
}
|
}
|
||||||
}
|
|
||||||
output[flat] = input.index(input_coord);
|
output[flat] = input.index(input_coord);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1289,12 +1083,6 @@ namespace sd {
|
|||||||
return T(0);
|
return T(0);
|
||||||
case InterpolateMode::Nearest:
|
case InterpolateMode::Nearest:
|
||||||
return T(0);
|
return T(0);
|
||||||
case InterpolateMode::NearestExact:
|
|
||||||
return T(0);
|
|
||||||
case InterpolateMode::Bilinear:
|
|
||||||
case InterpolateMode::Bicubic:
|
|
||||||
case InterpolateMode::Lanczos:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tensor_throw_invalid_argument("Unsupported interpolate mode: mode=" +
|
tensor_throw_invalid_argument("Unsupported interpolate mode: mode=" +
|
||||||
@ -1314,12 +1102,6 @@ namespace sd {
|
|||||||
break;
|
break;
|
||||||
case InterpolateMode::Nearest:
|
case InterpolateMode::Nearest:
|
||||||
break;
|
break;
|
||||||
case InterpolateMode::NearestExact:
|
|
||||||
break;
|
|
||||||
case InterpolateMode::Bilinear:
|
|
||||||
case InterpolateMode::Bicubic:
|
|
||||||
case InterpolateMode::Lanczos:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1375,20 +1157,17 @@ namespace sd {
|
|||||||
const std::optional<std::vector<int64_t>>& size,
|
const std::optional<std::vector<int64_t>>& size,
|
||||||
const std::optional<std::vector<double>>& scale_factor,
|
const std::optional<std::vector<double>>& scale_factor,
|
||||||
InterpolateMode mode = InterpolateMode::Nearest,
|
InterpolateMode mode = InterpolateMode::Nearest,
|
||||||
bool align_corners = false,
|
bool align_corners = false) {
|
||||||
bool antialias = false) {
|
const bool is_nearest_like_mode = (mode == InterpolateMode::Nearest ||
|
||||||
const bool is_nearest_like_mode = is_nearest_like_interpolate_mode(mode);
|
mode == InterpolateMode::NearestMax ||
|
||||||
const bool is_2d_filter_mode = is_2d_filter_interpolate_mode(mode);
|
mode == InterpolateMode::NearestMin ||
|
||||||
if (!is_nearest_like_mode && !is_2d_filter_mode) {
|
mode == InterpolateMode::NearestAvg);
|
||||||
tensor_throw_invalid_argument("Unsupported interpolate mode: mode=" +
|
if (!is_nearest_like_mode) {
|
||||||
std::to_string(static_cast<int>(mode)));
|
tensor_throw_invalid_argument("Only nearest-like interpolate modes are implemented, got mode=" +
|
||||||
}
|
|
||||||
if (antialias && !is_2d_filter_mode) {
|
|
||||||
tensor_throw_invalid_argument("Tensor interpolate antialias requires a 2D filter mode: mode=" +
|
|
||||||
std::to_string(static_cast<int>(mode)));
|
std::to_string(static_cast<int>(mode)));
|
||||||
}
|
}
|
||||||
if (align_corners) {
|
if (align_corners) {
|
||||||
tensor_throw_invalid_argument("align_corners is not supported for tensor interpolate: input_shape=" +
|
tensor_throw_invalid_argument("align_corners is not supported for nearest-like interpolate: input_shape=" +
|
||||||
tensor_shape_to_string(input.shape()));
|
tensor_shape_to_string(input.shape()));
|
||||||
}
|
}
|
||||||
if (size.has_value() == scale_factor.has_value()) {
|
if (size.has_value() == scale_factor.has_value()) {
|
||||||
@ -1432,7 +1211,7 @@ namespace sd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return interpolate(input, std::move(output_shape), mode, align_corners, antialias);
|
return interpolate(input, std::move(output_shape), mode, align_corners);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -1440,14 +1219,12 @@ namespace sd {
|
|||||||
const std::optional<std::vector<int64_t>>& size,
|
const std::optional<std::vector<int64_t>>& size,
|
||||||
double scale_factor,
|
double scale_factor,
|
||||||
InterpolateMode mode = InterpolateMode::Nearest,
|
InterpolateMode mode = InterpolateMode::Nearest,
|
||||||
bool align_corners = false,
|
bool align_corners = false) {
|
||||||
bool antialias = false) {
|
|
||||||
return interpolate(input,
|
return interpolate(input,
|
||||||
size,
|
size,
|
||||||
std::vector<double>(size.has_value() ? size->size() : input.dim(), scale_factor),
|
std::vector<double>(size.has_value() ? size->size() : input.dim(), scale_factor),
|
||||||
mode,
|
mode,
|
||||||
align_corners,
|
align_corners);
|
||||||
antialias);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
|||||||
@ -62,7 +62,7 @@ void CLIPTokenizer::load_from_merges(const std::string& merges_utf8_str) {
|
|||||||
}
|
}
|
||||||
vocab.push_back(utf8_to_utf32("<|startoftext|>"));
|
vocab.push_back(utf8_to_utf32("<|startoftext|>"));
|
||||||
vocab.push_back(utf8_to_utf32("<|endoftext|>"));
|
vocab.push_back(utf8_to_utf32("<|endoftext|>"));
|
||||||
LOG_DEBUG("vocab size: %zu", vocab.size());
|
LOG_DEBUG("vocab size: %llu", vocab.size());
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (const auto& token : vocab) {
|
for (const auto& token : vocab) {
|
||||||
encoder[token] = i;
|
encoder[token] = i;
|
||||||
|
|||||||
@ -28,7 +28,7 @@ void MistralTokenizer::load_from_merges(const std::string& merges_utf8_str, cons
|
|||||||
byte_decoder[pair.second] = pair.first;
|
byte_decoder[pair.second] = pair.first;
|
||||||
}
|
}
|
||||||
std::vector<std::u32string> merges = split_utf32(merges_utf8_str);
|
std::vector<std::u32string> merges = split_utf32(merges_utf8_str);
|
||||||
LOG_DEBUG("merges size %zu", merges.size());
|
LOG_DEBUG("merges size %llu", merges.size());
|
||||||
std::vector<std::pair<std::u32string, std::u32string>> merge_pairs;
|
std::vector<std::pair<std::u32string, std::u32string>> merge_pairs;
|
||||||
for (const auto& merge : merges) {
|
for (const auto& merge : merges) {
|
||||||
size_t space_pos = merge.find(' ');
|
size_t space_pos = merge.find(' ');
|
||||||
|
|||||||
@ -11,7 +11,7 @@ void Qwen2Tokenizer::load_from_merges(const std::string& merges_utf8_str) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::u32string> merges = split_utf32(merges_utf8_str);
|
std::vector<std::u32string> merges = split_utf32(merges_utf8_str);
|
||||||
LOG_DEBUG("merges size %zu", merges.size());
|
LOG_DEBUG("merges size %llu", merges.size());
|
||||||
std::vector<std::pair<std::u32string, std::u32string>> merge_pairs;
|
std::vector<std::pair<std::u32string, std::u32string>> merge_pairs;
|
||||||
for (const auto& merge : merges) {
|
for (const auto& merge : merges) {
|
||||||
size_t space_pos = merge.find(' ');
|
size_t space_pos = merge.find(' ');
|
||||||
@ -81,11 +81,6 @@ Qwen2Tokenizer::Qwen2Tokenizer(const std::string& merges_utf8_str) {
|
|||||||
"</tool_response>",
|
"</tool_response>",
|
||||||
"<think>",
|
"<think>",
|
||||||
"</think>",
|
"</think>",
|
||||||
"<|boi_token|>",
|
|
||||||
"<|bor_token|>",
|
|
||||||
"<|eor_token|>",
|
|
||||||
"<|bot_token|>",
|
|
||||||
"<|tms_token|>",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (merges_utf8_str.size() > 0) {
|
if (merges_utf8_str.size() > 0) {
|
||||||
|
|||||||
@ -482,14 +482,12 @@ public:
|
|||||||
|
|
||||||
emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim]
|
emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim]
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(emb, "unet.prelude", "emb");
|
|
||||||
|
|
||||||
// input_blocks
|
// input_blocks
|
||||||
std::vector<ggml_tensor*> hs;
|
std::vector<ggml_tensor*> hs;
|
||||||
|
|
||||||
// input block 0
|
// input block 0
|
||||||
auto h = input_blocks_0_0->forward(ctx, x);
|
auto h = input_blocks_0_0->forward(ctx, x);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(h, "unet.input_blocks.0", "h");
|
|
||||||
|
|
||||||
ggml_set_name(h, "bench-start");
|
ggml_set_name(h, "bench-start");
|
||||||
hs.push_back(h);
|
hs.push_back(h);
|
||||||
@ -507,7 +505,6 @@ public:
|
|||||||
std::string name = "input_blocks." + std::to_string(input_block_idx) + ".1";
|
std::string name = "input_blocks." + std::to_string(input_block_idx) + ".1";
|
||||||
h = attention_layer_forward(name, ctx, h, context, num_video_frames); // [N, mult*model_channels, h, w]
|
h = attention_layer_forward(name, ctx, h, context, num_video_frames); // [N, mult*model_channels, h, w]
|
||||||
}
|
}
|
||||||
sd::ggml_graph_cut::mark_graph_cut(h, "unet.input_blocks." + std::to_string(input_block_idx), "h");
|
|
||||||
hs.push_back(h);
|
hs.push_back(h);
|
||||||
}
|
}
|
||||||
if (tiny_unet) {
|
if (tiny_unet) {
|
||||||
@ -521,7 +518,6 @@ public:
|
|||||||
auto block = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
auto block = std::dynamic_pointer_cast<DownSampleBlock>(blocks[name]);
|
||||||
|
|
||||||
h = block->forward(ctx, h); // [N, mult*model_channels, h/(2^(i+1)), w/(2^(i+1))]
|
h = block->forward(ctx, h); // [N, mult*model_channels, h/(2^(i+1)), w/(2^(i+1))]
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(h, "unet.input_blocks." + std::to_string(input_block_idx), "h");
|
|
||||||
hs.push_back(h);
|
hs.push_back(h);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -535,7 +531,6 @@ public:
|
|||||||
h = resblock_forward("middle_block.2", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8]
|
h = resblock_forward("middle_block.2", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sd::ggml_graph_cut::mark_graph_cut(h, "unet.middle_block", "h");
|
|
||||||
if (controls.size() > 0) {
|
if (controls.size() > 0) {
|
||||||
auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[controls.size() - 1], control_strength, true);
|
auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[controls.size() - 1], control_strength, true);
|
||||||
h = ggml_add(ctx->ggml_ctx, h, cs); // middle control
|
h = ggml_add(ctx->ggml_ctx, h, cs); // middle control
|
||||||
@ -586,7 +581,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
output_block_idx += 1;
|
output_block_idx += 1;
|
||||||
sd::ggml_graph_cut::mark_graph_cut(h, "unet.output_blocks." + std::to_string(output_block_idx - 1), "h");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,31 +1,50 @@
|
|||||||
#include "upscaler.h"
|
#include "esrgan.hpp"
|
||||||
#include "ggml_extend.hpp"
|
#include "ggml_extend.hpp"
|
||||||
#include "model.h"
|
#include "model.h"
|
||||||
#include "stable-diffusion.h"
|
#include "stable-diffusion.h"
|
||||||
#include "util.h"
|
#include "util.h"
|
||||||
|
|
||||||
UpscalerGGML::UpscalerGGML(int n_threads,
|
struct UpscalerGGML {
|
||||||
bool direct,
|
ggml_backend_t backend = nullptr; // general backend
|
||||||
int tile_size)
|
ggml_type model_data_type = GGML_TYPE_F16;
|
||||||
|
std::shared_ptr<ESRGAN> esrgan_upscaler;
|
||||||
|
std::string esrgan_path;
|
||||||
|
int n_threads;
|
||||||
|
bool direct = false;
|
||||||
|
int tile_size = 128;
|
||||||
|
|
||||||
|
UpscalerGGML(int n_threads,
|
||||||
|
bool direct = false,
|
||||||
|
int tile_size = 128)
|
||||||
: n_threads(n_threads),
|
: n_threads(n_threads),
|
||||||
direct(direct),
|
direct(direct),
|
||||||
tile_size(tile_size) {
|
tile_size(tile_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void UpscalerGGML::set_max_graph_vram_bytes(size_t max_vram_bytes) {
|
bool load_from_file(const std::string& esrgan_path,
|
||||||
max_graph_vram_bytes = max_vram_bytes;
|
|
||||||
if (esrgan_upscaler) {
|
|
||||||
esrgan_upscaler->set_max_graph_vram_bytes(max_vram_bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool UpscalerGGML::load_from_file(const std::string& esrgan_path,
|
|
||||||
bool offload_params_to_cpu,
|
bool offload_params_to_cpu,
|
||||||
int n_threads) {
|
int n_threads) {
|
||||||
ggml_log_set(ggml_log_callback_default, nullptr);
|
ggml_log_set(ggml_log_callback_default, nullptr);
|
||||||
|
#ifdef SD_USE_CUDA
|
||||||
backend = sd_get_default_backend();
|
LOG_DEBUG("Using CUDA backend");
|
||||||
|
backend = ggml_backend_cuda_init(0);
|
||||||
|
#endif
|
||||||
|
#ifdef SD_USE_METAL
|
||||||
|
LOG_DEBUG("Using Metal backend");
|
||||||
|
backend = ggml_backend_metal_init();
|
||||||
|
#endif
|
||||||
|
#ifdef SD_USE_VULKAN
|
||||||
|
LOG_DEBUG("Using Vulkan backend");
|
||||||
|
backend = ggml_backend_vk_init(0);
|
||||||
|
#endif
|
||||||
|
#ifdef SD_USE_OPENCL
|
||||||
|
LOG_DEBUG("Using OpenCL backend");
|
||||||
|
backend = ggml_backend_opencl_init();
|
||||||
|
#endif
|
||||||
|
#ifdef SD_USE_SYCL
|
||||||
|
LOG_DEBUG("Using SYCL backend");
|
||||||
|
backend = ggml_backend_sycl_init(0);
|
||||||
|
#endif
|
||||||
ModelLoader model_loader;
|
ModelLoader model_loader;
|
||||||
if (!model_loader.init_from_file_and_convert_name(esrgan_path)) {
|
if (!model_loader.init_from_file_and_convert_name(esrgan_path)) {
|
||||||
LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str());
|
LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str());
|
||||||
@ -37,7 +56,6 @@ bool UpscalerGGML::load_from_file(const std::string& esrgan_path,
|
|||||||
}
|
}
|
||||||
LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type));
|
LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type));
|
||||||
esrgan_upscaler = std::make_shared<ESRGAN>(backend, offload_params_to_cpu, tile_size, model_loader.get_tensor_storage_map());
|
esrgan_upscaler = std::make_shared<ESRGAN>(backend, offload_params_to_cpu, tile_size, model_loader.get_tensor_storage_map());
|
||||||
esrgan_upscaler->set_max_graph_vram_bytes(max_graph_vram_bytes);
|
|
||||||
if (direct) {
|
if (direct) {
|
||||||
esrgan_upscaler->set_conv2d_direct_enabled(true);
|
esrgan_upscaler->set_conv2d_direct_enabled(true);
|
||||||
}
|
}
|
||||||
@ -47,7 +65,7 @@ bool UpscalerGGML::load_from_file(const std::string& esrgan_path,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
sd::Tensor<float> UpscalerGGML::upscale_tensor(const sd::Tensor<float>& input_tensor) {
|
sd::Tensor<float> upscale_tensor(const sd::Tensor<float>& input_tensor) {
|
||||||
sd::Tensor<float> upscaled;
|
sd::Tensor<float> upscaled;
|
||||||
if (tile_size <= 0 || (input_tensor.shape()[0] <= tile_size && input_tensor.shape()[1] <= tile_size)) {
|
if (tile_size <= 0 || (input_tensor.shape()[0] <= tile_size && input_tensor.shape()[1] <= tile_size)) {
|
||||||
upscaled = esrgan_upscaler->compute(n_threads, input_tensor);
|
upscaled = esrgan_upscaler->compute(n_threads, input_tensor);
|
||||||
@ -80,7 +98,7 @@ sd::Tensor<float> UpscalerGGML::upscale_tensor(const sd::Tensor<float>& input_te
|
|||||||
return upscaled;
|
return upscaled;
|
||||||
}
|
}
|
||||||
|
|
||||||
sd_image_t UpscalerGGML::upscale(sd_image_t input_image, uint32_t upscale_factor) {
|
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) {
|
||||||
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
|
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
|
||||||
sd_image_t upscaled_image = {0, 0, 0, nullptr};
|
sd_image_t upscaled_image = {0, 0, 0, nullptr};
|
||||||
int output_width = (int)input_image.width * esrgan_upscaler->scale;
|
int output_width = (int)input_image.width * esrgan_upscaler->scale;
|
||||||
@ -101,6 +119,7 @@ sd_image_t UpscalerGGML::upscale(sd_image_t input_image, uint32_t upscale_factor
|
|||||||
upscaled_image = upscaled_data;
|
upscaled_image = upscaled_data;
|
||||||
return upscaled_image;
|
return upscaled_image;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct upscaler_ctx_t {
|
struct upscaler_ctx_t {
|
||||||
UpscalerGGML* upscaler = nullptr;
|
UpscalerGGML* upscaler = nullptr;
|
||||||
|
|||||||
@ -1,33 +0,0 @@
|
|||||||
#ifndef __SD_UPSCALER_H__
|
|
||||||
#define __SD_UPSCALER_H__
|
|
||||||
|
|
||||||
#include "esrgan.hpp"
|
|
||||||
#include "stable-diffusion.h"
|
|
||||||
#include "tensor.hpp"
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <string>
|
|
||||||
|
|
||||||
struct UpscalerGGML {
|
|
||||||
ggml_backend_t backend = nullptr; // general backend
|
|
||||||
ggml_type model_data_type = GGML_TYPE_F16;
|
|
||||||
std::shared_ptr<ESRGAN> esrgan_upscaler;
|
|
||||||
std::string esrgan_path;
|
|
||||||
int n_threads;
|
|
||||||
bool direct = false;
|
|
||||||
int tile_size = 128;
|
|
||||||
size_t max_graph_vram_bytes = 0;
|
|
||||||
|
|
||||||
UpscalerGGML(int n_threads,
|
|
||||||
bool direct = false,
|
|
||||||
int tile_size = 128);
|
|
||||||
|
|
||||||
bool load_from_file(const std::string& esrgan_path,
|
|
||||||
bool offload_params_to_cpu,
|
|
||||||
int n_threads);
|
|
||||||
void set_max_graph_vram_bytes(size_t max_vram_bytes);
|
|
||||||
sd::Tensor<float> upscale_tensor(const sd::Tensor<float>& input_tensor);
|
|
||||||
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor);
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // __SD_UPSCALER_H__
|
|
||||||
236
src/util.cpp
236
src/util.cpp
@ -23,9 +23,8 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
#include "ggml-cpu.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml_extend_backend.hpp"
|
|
||||||
#include "stable-diffusion.h"
|
#include "stable-diffusion.h"
|
||||||
|
|
||||||
bool ends_with(const std::string& str, const std::string& ending) {
|
bool ends_with(const std::string& str, const std::string& ending) {
|
||||||
@ -112,7 +111,7 @@ private:
|
|||||||
HANDLE hmapping_;
|
HANDLE hmapping_;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename, bool writable) {
|
std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename) {
|
||||||
void* mapped_data = nullptr;
|
void* mapped_data = nullptr;
|
||||||
size_t file_size = 0;
|
size_t file_size = 0;
|
||||||
|
|
||||||
@ -120,10 +119,10 @@ std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename, bo
|
|||||||
filename.c_str(),
|
filename.c_str(),
|
||||||
GENERIC_READ,
|
GENERIC_READ,
|
||||||
FILE_SHARE_READ,
|
FILE_SHARE_READ,
|
||||||
nullptr,
|
NULL,
|
||||||
OPEN_EXISTING,
|
OPEN_EXISTING,
|
||||||
FILE_ATTRIBUTE_NORMAL,
|
FILE_ATTRIBUTE_NORMAL,
|
||||||
nullptr);
|
NULL);
|
||||||
|
|
||||||
if (file_handle == INVALID_HANDLE_VALUE) {
|
if (file_handle == INVALID_HANDLE_VALUE) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -137,20 +136,16 @@ std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename, bo
|
|||||||
|
|
||||||
file_size = static_cast<size_t>(size.QuadPart);
|
file_size = static_cast<size_t>(size.QuadPart);
|
||||||
|
|
||||||
DWORD page_prot = writable ? PAGE_WRITECOPY : PAGE_READONLY;
|
HANDLE mapping_handle = CreateFileMapping(file_handle, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||||
|
|
||||||
HANDLE mapping_handle = CreateFileMapping(file_handle, nullptr, page_prot, 0, 0, nullptr);
|
if (mapping_handle == NULL) {
|
||||||
|
|
||||||
if (mapping_handle == nullptr) {
|
|
||||||
CloseHandle(file_handle);
|
CloseHandle(file_handle);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
DWORD view_access = writable ? FILE_MAP_COPY : FILE_MAP_READ;
|
mapped_data = MapViewOfFile(mapping_handle, FILE_MAP_READ, 0, 0, file_size);
|
||||||
|
|
||||||
mapped_data = MapViewOfFile(mapping_handle, view_access, 0, 0, file_size);
|
if (mapped_data == NULL) {
|
||||||
|
|
||||||
if (mapped_data == nullptr) {
|
|
||||||
CloseHandle(mapping_handle);
|
CloseHandle(mapping_handle);
|
||||||
CloseHandle(file_handle);
|
CloseHandle(file_handle);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -176,85 +171,28 @@ bool is_directory(const std::string& path) {
|
|||||||
return (stat(path.c_str(), &buffer) == 0 && S_ISDIR(buffer.st_mode));
|
return (stat(path.c_str(), &buffer) == 0 && S_ISDIR(buffer.st_mode));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MmapFlags {
|
|
||||||
bool sequential;
|
|
||||||
bool populate;
|
|
||||||
bool willneed;
|
|
||||||
bool dontneed;
|
|
||||||
};
|
|
||||||
|
|
||||||
static MmapFlags get_mmap_flags() {
|
|
||||||
MmapFlags result = {};
|
|
||||||
const char* SD_MMAP_FLAGS = std::getenv("SD_MMAP_FLAGS");
|
|
||||||
if (SD_MMAP_FLAGS && *SD_MMAP_FLAGS) {
|
|
||||||
std::stringstream ss(SD_MMAP_FLAGS);
|
|
||||||
std::string token;
|
|
||||||
while (std::getline(ss, token, ',')) {
|
|
||||||
std::string ntoken = trim(token);
|
|
||||||
std::transform(ntoken.begin(), ntoken.end(), ntoken.begin(), ::tolower);
|
|
||||||
if (ntoken == "sequential") {
|
|
||||||
result.sequential = true;
|
|
||||||
} else if (ntoken == "populate") {
|
|
||||||
result.populate = true;
|
|
||||||
} else if (ntoken == "willneed") {
|
|
||||||
result.willneed = true;
|
|
||||||
} else if (ntoken == "dontneed") {
|
|
||||||
result.dontneed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
class MmapWrapperImpl : public MmapWrapper {
|
class MmapWrapperImpl : public MmapWrapper {
|
||||||
public:
|
public:
|
||||||
MmapWrapperImpl(void* data, size_t size, int fd)
|
MmapWrapperImpl(void* data, size_t size)
|
||||||
: MmapWrapper(data, size), fd_(fd) {}
|
: MmapWrapper(data, size) {}
|
||||||
|
|
||||||
~MmapWrapperImpl() override {
|
~MmapWrapperImpl() override {
|
||||||
#ifdef __linux__
|
|
||||||
auto cfg_flags = get_mmap_flags();
|
|
||||||
|
|
||||||
// Drop the kernel pagecache pages for this file. madvise(DONTNEED)
|
|
||||||
// alone only unmaps from the process address space; pagecache
|
|
||||||
// entries persist (`free` reports them as buff/cache and the OOM
|
|
||||||
// killer doesn't touch them, but they ARE counted against
|
|
||||||
// overcommit and can starve other allocations on tight-RAM
|
|
||||||
// systems). posix_fadvise(POSIX_FADV_DONTNEED) is the documented
|
|
||||||
// way to evict pagecache for a specific fd's pages.
|
|
||||||
if (cfg_flags.dontneed) {
|
|
||||||
madvise(data_, size_, MADV_DONTNEED);
|
|
||||||
posix_fadvise(fd_, 0, 0, POSIX_FADV_DONTNEED);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
munmap(data_, size_);
|
munmap(data_, size_);
|
||||||
close(fd_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
int fd_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename, bool writable) {
|
std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename) {
|
||||||
int file_descriptor = open(filename.c_str(), O_RDONLY);
|
int file_descriptor = open(filename.c_str(), O_RDONLY);
|
||||||
if (file_descriptor == -1) {
|
if (file_descriptor == -1) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto cfg_flags = get_mmap_flags();
|
|
||||||
|
|
||||||
int mmap_flags = MAP_PRIVATE;
|
int mmap_flags = MAP_PRIVATE;
|
||||||
|
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
// Sequential access hint helps the kernel read-ahead efficiently and
|
// performance flags used by llama.cpp
|
||||||
// also encourages eviction of already-read pages (the kernel keeps
|
// posix_fadvise(file_descriptor, 0, 0, POSIX_FADV_SEQUENTIAL);
|
||||||
// a smaller working set when this is set).
|
// mmap_flags |= MAP_POPULATE;
|
||||||
if (cfg_flags.sequential) {
|
|
||||||
posix_fadvise(file_descriptor, 0, 0, POSIX_FADV_SEQUENTIAL);
|
|
||||||
}
|
|
||||||
if (cfg_flags.populate) {
|
|
||||||
mmap_flags |= MAP_POPULATE;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct stat sb;
|
struct stat sb;
|
||||||
@ -265,27 +203,20 @@ std::unique_ptr<MmapWrapper> MmapWrapper::create(const std::string& filename, bo
|
|||||||
|
|
||||||
size_t file_size = sb.st_size;
|
size_t file_size = sb.st_size;
|
||||||
|
|
||||||
if (file_size == 0) {
|
void* mapped_data = mmap(NULL, file_size, PROT_READ, mmap_flags, file_descriptor, 0);
|
||||||
|
|
||||||
close(file_descriptor);
|
close(file_descriptor);
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
int mmap_prot = PROT_READ | (writable ? PROT_WRITE : 0);
|
|
||||||
|
|
||||||
void* mapped_data = mmap(nullptr, file_size, mmap_prot, mmap_flags, file_descriptor, 0);
|
|
||||||
|
|
||||||
if (mapped_data == MAP_FAILED) {
|
if (mapped_data == MAP_FAILED) {
|
||||||
close(file_descriptor);
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
if (cfg_flags.willneed) {
|
// performance flags used by llama.cpp
|
||||||
posix_madvise(mapped_data, file_size, POSIX_MADV_WILLNEED);
|
// posix_madvise(mapped_data, file_size, POSIX_MADV_WILLNEED);
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return std::make_unique<MmapWrapperImpl>(mapped_data, file_size, file_descriptor);
|
return std::make_unique<MmapWrapperImpl>(mapped_data, file_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -564,6 +495,26 @@ sd_progress_cb_t sd_get_progress_callback() {
|
|||||||
void* sd_get_progress_callback_data() {
|
void* sd_get_progress_callback_data() {
|
||||||
return sd_progress_cb_data;
|
return sd_progress_cb_data;
|
||||||
}
|
}
|
||||||
|
const char* sd_get_system_info() {
|
||||||
|
static char buffer[1024];
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "System Info: \n";
|
||||||
|
ss << " SSE3 = " << ggml_cpu_has_sse3() << " | ";
|
||||||
|
ss << " AVX = " << ggml_cpu_has_avx() << " | ";
|
||||||
|
ss << " AVX2 = " << ggml_cpu_has_avx2() << " | ";
|
||||||
|
ss << " AVX512 = " << ggml_cpu_has_avx512() << " | ";
|
||||||
|
ss << " AVX512_VBMI = " << ggml_cpu_has_avx512_vbmi() << " | ";
|
||||||
|
ss << " AVX512_VNNI = " << ggml_cpu_has_avx512_vnni() << " | ";
|
||||||
|
ss << " FMA = " << ggml_cpu_has_fma() << " | ";
|
||||||
|
ss << " NEON = " << ggml_cpu_has_neon() << " | ";
|
||||||
|
ss << " ARM_FMA = " << ggml_cpu_has_arm_fma() << " | ";
|
||||||
|
ss << " F16C = " << ggml_cpu_has_f16c() << " | ";
|
||||||
|
ss << " FP16_VA = " << ggml_cpu_has_fp16_va() << " | ";
|
||||||
|
ss << " WASM_SIMD = " << ggml_cpu_has_wasm_simd() << " | ";
|
||||||
|
ss << " VSX = " << ggml_cpu_has_vsx() << " | ";
|
||||||
|
snprintf(buffer, sizeof(buffer), "%s", ss.str().c_str());
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index) {
|
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index) {
|
||||||
const auto& shape = tensor.shape();
|
const auto& shape = tensor.shape();
|
||||||
@ -573,7 +524,17 @@ sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index)
|
|||||||
int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
|
int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
|
||||||
uint8_t* data = (uint8_t*)malloc(static_cast<size_t>(width * height * channel));
|
uint8_t* data = (uint8_t*)malloc(static_cast<size_t>(width * height * channel));
|
||||||
GGML_ASSERT(data != nullptr);
|
GGML_ASSERT(data != nullptr);
|
||||||
preprocessing_tensor_frame_to_sd_image(tensor, frame_index, data);
|
|
||||||
|
for (int iw = 0; iw < width; ++iw) {
|
||||||
|
for (int ih = 0; ih < height; ++ih) {
|
||||||
|
for (int ic = 0; ic < channel; ++ic) {
|
||||||
|
float value = shape.size() == 5 ? tensor.index(iw, ih, frame_index, ic, 0)
|
||||||
|
: tensor.index(iw, ih, ic, frame_index);
|
||||||
|
value = std::clamp(value, 0.0f, 1.0f);
|
||||||
|
data[(ih * width + iw) * channel + ic] = static_cast<uint8_t>(std::round(value * 255.0f));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return {
|
return {
|
||||||
static_cast<uint32_t>(width),
|
static_cast<uint32_t>(width),
|
||||||
static_cast<uint32_t>(height),
|
static_cast<uint32_t>(height),
|
||||||
@ -757,100 +718,3 @@ std::vector<std::pair<std::string, float>> parse_prompt_attention(const std::str
|
|||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc.
|
|
||||||
bool sd_backend_is(ggml_backend_t backend, const std::string& name) {
|
|
||||||
if (!backend) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
ggml_backend_dev_t dev = ggml_backend_get_device(backend);
|
|
||||||
if (!dev)
|
|
||||||
return false;
|
|
||||||
std::string dev_name = ggml_backend_dev_name(dev);
|
|
||||||
return dev_name.find(name) != std::string::npos;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_t sd_get_default_backend() {
|
|
||||||
ggml_backend_load_all_once();
|
|
||||||
static std::once_flag once;
|
|
||||||
std::call_once(once, []() {
|
|
||||||
size_t dev_count = ggml_backend_dev_count();
|
|
||||||
if (dev_count == 0) {
|
|
||||||
LOG_ERROR("No devices found!");
|
|
||||||
} else {
|
|
||||||
LOG_DEBUG("Found %zu backend devices:", dev_count);
|
|
||||||
for (size_t i = 0; i < dev_count; ++i) {
|
|
||||||
auto dev = ggml_backend_dev_get(i);
|
|
||||||
LOG_DEBUG("#%zu: %s", i, ggml_backend_dev_name(dev));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
ggml_backend_t backend = nullptr;
|
|
||||||
const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE");
|
|
||||||
if (SD_VK_DEVICE != nullptr) {
|
|
||||||
std::string sd_vk_device_str = SD_VK_DEVICE;
|
|
||||||
try {
|
|
||||||
unsigned long long device = std::stoull(sd_vk_device_str);
|
|
||||||
std::string vk_device_name = "Vulkan" + std::to_string(device);
|
|
||||||
if (backend_name_exists(vk_device_name)) {
|
|
||||||
LOG_INFO("Selecting %s as main device by env var SD_VK_DEVICE", vk_device_name.c_str());
|
|
||||||
backend = init_named_backend(vk_device_name);
|
|
||||||
if (!backend) {
|
|
||||||
LOG_WARN("Device %s requested by SD_VK_DEVICE failed to init. Falling back to the default device.", vk_device_name.c_str());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
LOG_WARN("Device %s requested by SD_VK_DEVICE was not found. Falling back to the default device.", vk_device_name.c_str());
|
|
||||||
}
|
|
||||||
} catch (const std::invalid_argument&) {
|
|
||||||
LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to the default device.", SD_VK_DEVICE);
|
|
||||||
} catch (const std::out_of_range&) {
|
|
||||||
LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to the default device.", SD_VK_DEVICE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!backend) {
|
|
||||||
std::string dev_name = get_default_backend_name();
|
|
||||||
backend = init_named_backend(dev_name);
|
|
||||||
if (!backend && !dev_name.empty()) {
|
|
||||||
LOG_WARN("device %s failed to init", dev_name.c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!backend) {
|
|
||||||
LOG_WARN("loading CPU backend");
|
|
||||||
backend = ggml_backend_cpu_init();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ggml_backend_is_cpu(backend)) {
|
|
||||||
LOG_DEBUG("Using CPU backend");
|
|
||||||
}
|
|
||||||
|
|
||||||
return backend;
|
|
||||||
}
|
|
||||||
|
|
||||||
// namespace is needed to avoid conflicts with ggml_backend_extend.hpp
|
|
||||||
namespace ggml_cpu {
|
|
||||||
#include "ggml-cpu.h"
|
|
||||||
}
|
|
||||||
|
|
||||||
const char* sd_get_system_info() {
|
|
||||||
using namespace ggml_cpu;
|
|
||||||
static char buffer[1024];
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "System Info: \n";
|
|
||||||
ss << " SSE3 = " << ggml_cpu_has_sse3() << " | ";
|
|
||||||
ss << " AVX = " << ggml_cpu_has_avx() << " | ";
|
|
||||||
ss << " AVX2 = " << ggml_cpu_has_avx2() << " | ";
|
|
||||||
ss << " AVX512 = " << ggml_cpu_has_avx512() << " | ";
|
|
||||||
ss << " AVX512_VBMI = " << ggml_cpu_has_avx512_vbmi() << " | ";
|
|
||||||
ss << " AVX512_VNNI = " << ggml_cpu_has_avx512_vnni() << " | ";
|
|
||||||
ss << " FMA = " << ggml_cpu_has_fma() << " | ";
|
|
||||||
ss << " NEON = " << ggml_cpu_has_neon() << " | ";
|
|
||||||
ss << " ARM_FMA = " << ggml_cpu_has_arm_fma() << " | ";
|
|
||||||
ss << " F16C = " << ggml_cpu_has_f16c() << " | ";
|
|
||||||
ss << " FP16_VA = " << ggml_cpu_has_fp16_va() << " | ";
|
|
||||||
ss << " WASM_SIMD = " << ggml_cpu_has_wasm_simd() << " | ";
|
|
||||||
ss << " VSX = " << ggml_cpu_has_vsx() << " | ";
|
|
||||||
snprintf(buffer, sizeof(buffer), "%s", ss.str().c_str());
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|||||||
@ -6,7 +6,6 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "ggml-backend.h"
|
|
||||||
#include "stable-diffusion.h"
|
#include "stable-diffusion.h"
|
||||||
#include "tensor.hpp"
|
#include "tensor.hpp"
|
||||||
|
|
||||||
@ -42,7 +41,7 @@ sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_wid
|
|||||||
|
|
||||||
class MmapWrapper {
|
class MmapWrapper {
|
||||||
public:
|
public:
|
||||||
static std::unique_ptr<MmapWrapper> create(const std::string& filename, bool writable = false);
|
static std::unique_ptr<MmapWrapper> create(const std::string& filename);
|
||||||
|
|
||||||
virtual ~MmapWrapper() = default;
|
virtual ~MmapWrapper() = default;
|
||||||
|
|
||||||
@ -52,7 +51,6 @@ public:
|
|||||||
MmapWrapper& operator=(MmapWrapper&&) = delete;
|
MmapWrapper& operator=(MmapWrapper&&) = delete;
|
||||||
|
|
||||||
const uint8_t* data() const { return static_cast<uint8_t*>(data_); }
|
const uint8_t* data() const { return static_cast<uint8_t*>(data_); }
|
||||||
uint8_t* writable_data() { return static_cast<uint8_t*>(data_); }
|
|
||||||
size_t size() const { return size_; }
|
size_t size() const { return size_; }
|
||||||
bool copy_data(void* buf, size_t n, size_t offset) const;
|
bool copy_data(void* buf, size_t n, size_t offset) const;
|
||||||
|
|
||||||
@ -84,10 +82,6 @@ int sd_get_preview_interval();
|
|||||||
bool sd_should_preview_denoised();
|
bool sd_should_preview_denoised();
|
||||||
bool sd_should_preview_noisy();
|
bool sd_should_preview_noisy();
|
||||||
|
|
||||||
// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc.
|
|
||||||
bool sd_backend_is(ggml_backend_t backend, const std::string& name);
|
|
||||||
ggml_backend_t sd_get_default_backend();
|
|
||||||
|
|
||||||
#define LOG_DEBUG(format, ...) log_printf(SD_LOG_DEBUG, __FILE__, __LINE__, format, ##__VA_ARGS__)
|
#define LOG_DEBUG(format, ...) log_printf(SD_LOG_DEBUG, __FILE__, __LINE__, format, ##__VA_ARGS__)
|
||||||
#define LOG_INFO(format, ...) log_printf(SD_LOG_INFO, __FILE__, __LINE__, format, ##__VA_ARGS__)
|
#define LOG_INFO(format, ...) log_printf(SD_LOG_INFO, __FILE__, __LINE__, format, ##__VA_ARGS__)
|
||||||
#define LOG_WARN(format, ...) log_printf(SD_LOG_WARN, __FILE__, __LINE__, format, ##__VA_ARGS__)
|
#define LOG_WARN(format, ...) log_printf(SD_LOG_WARN, __FILE__, __LINE__, format, ##__VA_ARGS__)
|
||||||
|
|||||||
@ -71,7 +71,7 @@ public:
|
|||||||
scale_factor = 16;
|
scale_factor = 16;
|
||||||
} else if (sd_version_uses_flux2_vae(version)) {
|
} else if (sd_version_uses_flux2_vae(version)) {
|
||||||
scale_factor = 16;
|
scale_factor = 16;
|
||||||
} else if (version == VERSION_CHROMA_RADIANCE || version == VERSION_HIDREAM_O1) {
|
} else if (version == VERSION_CHROMA_RADIANCE) {
|
||||||
scale_factor = 1;
|
scale_factor = 1;
|
||||||
}
|
}
|
||||||
return scale_factor;
|
return scale_factor;
|
||||||
@ -142,9 +142,8 @@ public:
|
|||||||
"vae encode compute failed while processing a tile");
|
"vae encode compute failed while processing a tile");
|
||||||
} else {
|
} else {
|
||||||
output = _compute(n_threads, input, false);
|
output = _compute(n_threads, input, false);
|
||||||
}
|
|
||||||
|
|
||||||
free_compute_buffer();
|
free_compute_buffer();
|
||||||
|
}
|
||||||
|
|
||||||
if (output.empty()) {
|
if (output.empty()) {
|
||||||
LOG_ERROR("vae encode compute failed");
|
LOG_ERROR("vae encode compute failed");
|
||||||
|
|||||||
23
src/wan.hpp
23
src/wan.hpp
@ -692,7 +692,6 @@ namespace WAN {
|
|||||||
} else {
|
} else {
|
||||||
x = conv1->forward(ctx, x);
|
x = conv1->forward(ctx, x);
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encoder.prelude", "x");
|
|
||||||
|
|
||||||
// downsamples
|
// downsamples
|
||||||
std::vector<int64_t> dims = {dim};
|
std::vector<int64_t> dims = {dim};
|
||||||
@ -718,14 +717,12 @@ namespace WAN {
|
|||||||
x = layer->forward(ctx, x, b, feat_cache, feat_idx, chunk_idx);
|
x = layer->forward(ctx, x, b, feat_cache, feat_idx, chunk_idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encoder.down." + std::to_string(i), "x");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// middle
|
// middle
|
||||||
x = middle_0->forward(ctx, x, b, feat_cache, feat_idx);
|
x = middle_0->forward(ctx, x, b, feat_cache, feat_idx);
|
||||||
x = middle_1->forward(ctx, x, b);
|
x = middle_1->forward(ctx, x, b);
|
||||||
x = middle_2->forward(ctx, x, b, feat_cache, feat_idx);
|
x = middle_2->forward(ctx, x, b, feat_cache, feat_idx);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encoder.mid", "x");
|
|
||||||
|
|
||||||
// head
|
// head
|
||||||
x = head_0->forward(ctx, x);
|
x = head_0->forward(ctx, x);
|
||||||
@ -866,13 +863,11 @@ namespace WAN {
|
|||||||
} else {
|
} else {
|
||||||
x = conv1->forward(ctx, x);
|
x = conv1->forward(ctx, x);
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decoder.prelude", "x");
|
|
||||||
|
|
||||||
// middle
|
// middle
|
||||||
x = middle_0->forward(ctx, x, b, feat_cache, feat_idx);
|
x = middle_0->forward(ctx, x, b, feat_cache, feat_idx);
|
||||||
x = middle_1->forward(ctx, x, b);
|
x = middle_1->forward(ctx, x, b);
|
||||||
x = middle_2->forward(ctx, x, b, feat_cache, feat_idx);
|
x = middle_2->forward(ctx, x, b, feat_cache, feat_idx);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decoder.mid", "x");
|
|
||||||
|
|
||||||
// upsamples
|
// upsamples
|
||||||
std::vector<int64_t> dims = {dim_mult[dim_mult.size() - 1] * dim};
|
std::vector<int64_t> dims = {dim_mult[dim_mult.size() - 1] * dim};
|
||||||
@ -898,7 +893,6 @@ namespace WAN {
|
|||||||
x = layer->forward(ctx, x, b, feat_cache, feat_idx, chunk_idx);
|
x = layer->forward(ctx, x, b, feat_cache, feat_idx, chunk_idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decoder.up." + std::to_string(i), "x");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// head
|
// head
|
||||||
@ -1037,7 +1031,6 @@ namespace WAN {
|
|||||||
if (wan2_2) {
|
if (wan2_2) {
|
||||||
x = patchify(ctx->ggml_ctx, x, 2, b);
|
x = patchify(ctx->ggml_ctx, x, 2, b);
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encode.prelude", "x");
|
|
||||||
|
|
||||||
auto encoder = std::dynamic_pointer_cast<Encoder3d>(blocks["encoder"]);
|
auto encoder = std::dynamic_pointer_cast<Encoder3d>(blocks["encoder"]);
|
||||||
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
|
auto conv1 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv1"]);
|
||||||
@ -1058,7 +1051,6 @@ namespace WAN {
|
|||||||
}
|
}
|
||||||
out = conv1->forward(ctx, out);
|
out = conv1->forward(ctx, out);
|
||||||
auto mu = ggml_ext_chunk(ctx->ggml_ctx, out, 2, 3)[0];
|
auto mu = ggml_ext_chunk(ctx->ggml_ctx, out, 2, 3)[0];
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(mu, "wan_vae.encode.final", "mu");
|
|
||||||
clear_cache();
|
clear_cache();
|
||||||
return mu;
|
return mu;
|
||||||
}
|
}
|
||||||
@ -1076,7 +1068,6 @@ namespace WAN {
|
|||||||
|
|
||||||
int64_t iter_ = z->ne[2];
|
int64_t iter_ = z->ne[2];
|
||||||
auto x = conv2->forward(ctx, z);
|
auto x = conv2->forward(ctx, z);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decode.prelude", "x");
|
|
||||||
ggml_tensor* out;
|
ggml_tensor* out;
|
||||||
for (int i = 0; i < iter_; i++) {
|
for (int i = 0; i < iter_; i++) {
|
||||||
_conv_idx = 0;
|
_conv_idx = 0;
|
||||||
@ -1092,7 +1083,6 @@ namespace WAN {
|
|||||||
if (wan2_2) {
|
if (wan2_2) {
|
||||||
out = unpatchify(ctx->ggml_ctx, out, 2, b);
|
out = unpatchify(ctx->ggml_ctx, out, 2, b);
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(out, "wan_vae.decode.final", "out");
|
|
||||||
clear_cache();
|
clear_cache();
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
@ -1108,14 +1098,12 @@ namespace WAN {
|
|||||||
auto conv2 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv2"]);
|
auto conv2 = std::dynamic_pointer_cast<CausalConv3d>(blocks["conv2"]);
|
||||||
|
|
||||||
auto x = conv2->forward(ctx, z);
|
auto x = conv2->forward(ctx, z);
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decode_partial.prelude", "x");
|
|
||||||
auto in = ggml_ext_slice(ctx->ggml_ctx, x, 2, i, i + 1); // [b*c, 1, h, w]
|
auto in = ggml_ext_slice(ctx->ggml_ctx, x, 2, i, i + 1); // [b*c, 1, h, w]
|
||||||
_conv_idx = 0;
|
_conv_idx = 0;
|
||||||
auto out = decoder->forward(ctx, in, b, _feat_map, _conv_idx, i);
|
auto out = decoder->forward(ctx, in, b, _feat_map, _conv_idx, i);
|
||||||
if (wan2_2) {
|
if (wan2_2) {
|
||||||
out = unpatchify(ctx->ggml_ctx, out, 2, b);
|
out = unpatchify(ctx->ggml_ctx, out, 2, b);
|
||||||
}
|
}
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(out, "wan_vae.decode_partial.final", "out");
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1996,13 +1984,6 @@ namespace WAN {
|
|||||||
c = ggml_reshape_3d(ctx->ggml_ctx, c, c->ne[0] * c->ne[1] * c->ne[2], c->ne[3] / N, N); // [N, dim, t_len*h_len*w_len]
|
c = ggml_reshape_3d(ctx->ggml_ctx, c, c->ne[0] * c->ne[1] * c->ne[2], c->ne[3] / N, N); // [N, dim, t_len*h_len*w_len]
|
||||||
c = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, c, 1, 0, 2, 3)); // [N, t_len*h_len*w_len, dim]
|
c = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, c, 1, 0, 2, 3)); // [N, t_len*h_len*w_len, dim]
|
||||||
}
|
}
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "wan.prelude", "x");
|
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(e, "wan.prelude", "e");
|
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(e0, "wan.prelude", "e0");
|
|
||||||
// sd::ggml_graph_cut::mark_graph_cut(context, "wan.prelude", "context");
|
|
||||||
if (c != nullptr) {
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(c, "wan.prelude", "c");
|
|
||||||
}
|
|
||||||
|
|
||||||
auto x_orig = x;
|
auto x_orig = x;
|
||||||
|
|
||||||
@ -2023,10 +2004,6 @@ namespace WAN {
|
|||||||
c_skip = ggml_ext_scale(ctx->ggml_ctx, c_skip, vace_strength);
|
c_skip = ggml_ext_scale(ctx->ggml_ctx, c_skip, vace_strength);
|
||||||
x = ggml_add(ctx->ggml_ctx, x, c_skip);
|
x = ggml_add(ctx->ggml_ctx, x, c_skip);
|
||||||
}
|
}
|
||||||
sd::ggml_graph_cut::mark_graph_cut(x, "wan.blocks." + std::to_string(i), "x");
|
|
||||||
if (c != nullptr) {
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(c, "wan.blocks." + std::to_string(i), "c");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
x = head->forward(ctx, x, e); // [N, t_len*h_len*w_len, pt*ph*pw*out_dim]
|
x = head->forward(ctx, x, e); // [N, t_len*h_len*w_len, pt*ph*pw*out_dim]
|
||||||
|
|||||||
@ -31,6 +31,10 @@ namespace ZImage {
|
|||||||
: head_dim(head_dim), num_heads(num_heads), num_kv_heads(num_kv_heads), qk_norm(qk_norm) {
|
: head_dim(head_dim), num_heads(num_heads), num_kv_heads(num_kv_heads), qk_norm(qk_norm) {
|
||||||
blocks["qkv"] = std::make_shared<Linear>(hidden_size, (num_heads + num_kv_heads * 2) * head_dim, false);
|
blocks["qkv"] = std::make_shared<Linear>(hidden_size, (num_heads + num_kv_heads * 2) * head_dim, false);
|
||||||
float scale = 1.f;
|
float scale = 1.f;
|
||||||
|
#if GGML_USE_HIP
|
||||||
|
// Prevent NaN issues with certain ROCm setups
|
||||||
|
scale = 1.f / 16.f;
|
||||||
|
#endif
|
||||||
blocks["out"] = std::make_shared<Linear>(num_heads * head_dim, hidden_size, false, false, false, scale);
|
blocks["out"] = std::make_shared<Linear>(num_heads * head_dim, hidden_size, false, false, false, scale);
|
||||||
if (qk_norm) {
|
if (qk_norm) {
|
||||||
blocks["q_norm"] = std::make_shared<RMSNorm>(head_dim);
|
blocks["q_norm"] = std::make_shared<RMSNorm>(head_dim);
|
||||||
@ -48,10 +52,6 @@ namespace ZImage {
|
|||||||
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
|
||||||
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out"]);
|
auto out_proj = std::dynamic_pointer_cast<Linear>(blocks["out"]);
|
||||||
|
|
||||||
if (sd_backend_is(ctx->backend, "ROCm")) {
|
|
||||||
out_proj->set_scale(1.f / 16.f);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim]
|
auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim]
|
||||||
qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim]
|
qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim]
|
||||||
|
|
||||||
@ -115,7 +115,9 @@ namespace ZImage {
|
|||||||
|
|
||||||
bool force_prec_f32 = false;
|
bool force_prec_f32 = false;
|
||||||
float scale = 1.f / 128.f;
|
float scale = 1.f / 128.f;
|
||||||
|
#ifdef SD_USE_VULKAN
|
||||||
|
force_prec_f32 = true;
|
||||||
|
#endif
|
||||||
// The purpose of the scale here is to prevent NaN issues in certain situations.
|
// The purpose of the scale here is to prevent NaN issues in certain situations.
|
||||||
// For example, when using CUDA but the weights are k-quants.
|
// For example, when using CUDA but the weights are k-quants.
|
||||||
blocks["w2"] = std::make_shared<Linear>(hidden_dim, dim, false, false, force_prec_f32, scale);
|
blocks["w2"] = std::make_shared<Linear>(hidden_dim, dim, false, false, force_prec_f32, scale);
|
||||||
@ -127,10 +129,6 @@ namespace ZImage {
|
|||||||
auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]);
|
auto w2 = std::dynamic_pointer_cast<Linear>(blocks["w2"]);
|
||||||
auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]);
|
auto w3 = std::dynamic_pointer_cast<Linear>(blocks["w3"]);
|
||||||
|
|
||||||
if (sd_backend_is(ctx->backend, "Vulkan")) {
|
|
||||||
w2->set_force_prec_f32(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto x1 = w1->forward(ctx, x);
|
auto x1 = w1->forward(ctx, x);
|
||||||
auto x3 = w3->forward(ctx, x);
|
auto x3 = w3->forward(ctx, x);
|
||||||
x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3);
|
x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3);
|
||||||
@ -371,9 +369,6 @@ namespace ZImage {
|
|||||||
|
|
||||||
auto txt = cap_embedder_1->forward(ctx, cap_embedder_0->forward(ctx, context)); // [N, n_txt_token, hidden_size]
|
auto txt = cap_embedder_1->forward(ctx, cap_embedder_0->forward(ctx, context)); // [N, n_txt_token, hidden_size]
|
||||||
auto img = x_embedder->forward(ctx, x); // [N, n_img_token, hidden_size]
|
auto img = x_embedder->forward(ctx, x); // [N, n_img_token, hidden_size]
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt, "z_image.prelude", "txt");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(img, "z_image.prelude", "img");
|
|
||||||
sd::ggml_graph_cut::mark_graph_cut(t_emb, "z_image.prelude", "t_emb");
|
|
||||||
|
|
||||||
int64_t n_txt_pad_token = Rope::bound_mod(static_cast<int>(n_txt_token), SEQ_MULTI_OF);
|
int64_t n_txt_pad_token = Rope::bound_mod(static_cast<int>(n_txt_token), SEQ_MULTI_OF);
|
||||||
if (n_txt_pad_token > 0) {
|
if (n_txt_pad_token > 0) {
|
||||||
@ -396,24 +391,20 @@ namespace ZImage {
|
|||||||
auto block = std::dynamic_pointer_cast<JointTransformerBlock>(blocks["context_refiner." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<JointTransformerBlock>(blocks["context_refiner." + std::to_string(i)]);
|
||||||
|
|
||||||
txt = block->forward(ctx, txt, txt_pe, nullptr, nullptr);
|
txt = block->forward(ctx, txt, txt_pe, nullptr, nullptr);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt, "z_image.context_refiner." + std::to_string(i), "txt");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < z_image_params.num_refiner_layers; i++) {
|
for (int i = 0; i < z_image_params.num_refiner_layers; i++) {
|
||||||
auto block = std::dynamic_pointer_cast<JointTransformerBlock>(blocks["noise_refiner." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<JointTransformerBlock>(blocks["noise_refiner." + std::to_string(i)]);
|
||||||
|
|
||||||
img = block->forward(ctx, img, img_pe, nullptr, t_emb);
|
img = block->forward(ctx, img, img_pe, nullptr, t_emb);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(img, "z_image.noise_refiner." + std::to_string(i), "img");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto txt_img = ggml_concat(ctx->ggml_ctx, txt, img, 1); // [N, n_txt_token + n_txt_pad_token + n_img_token + n_img_pad_token, hidden_size]
|
auto txt_img = ggml_concat(ctx->ggml_ctx, txt, img, 1); // [N, n_txt_token + n_txt_pad_token + n_img_token + n_img_pad_token, hidden_size]
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt_img, "z_image.prelude", "txt_img");
|
|
||||||
|
|
||||||
for (int i = 0; i < z_image_params.num_layers; i++) {
|
for (int i = 0; i < z_image_params.num_layers; i++) {
|
||||||
auto block = std::dynamic_pointer_cast<JointTransformerBlock>(blocks["layers." + std::to_string(i)]);
|
auto block = std::dynamic_pointer_cast<JointTransformerBlock>(blocks["layers." + std::to_string(i)]);
|
||||||
|
|
||||||
txt_img = block->forward(ctx, txt_img, pe, nullptr, t_emb);
|
txt_img = block->forward(ctx, txt_img, pe, nullptr, t_emb);
|
||||||
sd::ggml_graph_cut::mark_graph_cut(txt_img, "z_image.layers." + std::to_string(i), "txt_img");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
txt_img = final_layer->forward(ctx, txt_img, t_emb); // [N, n_txt_token + n_txt_pad_token + n_img_token + n_img_pad_token, ph*pw*C]
|
txt_img = final_layer->forward(ctx, txt_img, t_emb); // [N, n_txt_token + n_txt_pad_token + n_img_token + n_img_pad_token, ph*pw*C]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user