feat: add vace support (#819)

* add wan vace t2v support

* add --vace-strength option

* add vace i2v support

* fix the processing of vace_context

* add vace v2v support

* update docs
This commit is contained in:
leejet 2025-09-14 16:57:33 +08:00 committed by GitHub
parent 2c9b1e2594
commit 52a97b3ac1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 651 additions and 285 deletions

View File

@ -313,6 +313,9 @@ arguments:
-i, --end-img [IMAGE] path to the end image, required by flf2v -i, --end-img [IMAGE] path to the end image, required by flf2v
--control-image [IMAGE] path to image condition, control net --control-image [IMAGE] path to image condition, control net
-r, --ref-image [PATH] reference image for Flux Kontext models (can be used multiple times) -r, --ref-image [PATH] reference image for Flux Kontext models (can be used multiple times)
--control-video [PATH] path to control video frames, It must be a directory path.
The video frames inside should be stored as images in lexicographical (character) order
For example, if the control video path is `frames`, the directory contain images such as 00.png, 01.png, 鈥?etc.
--increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1). --increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).
-o, --output OUTPUT path to write result image to (default: ./output.png) -o, --output OUTPUT path to write result image to (default: ./output.png)
-p, --prompt [PROMPT] the prompt to render -p, --prompt [PROMPT] the prompt to render
@ -379,6 +382,7 @@ arguments:
--moe-boundary BOUNDARY timestep boundary for Wan2.2 MoE model. (default: 0.875) --moe-boundary BOUNDARY timestep boundary for Wan2.2 MoE model. (default: 0.875)
only enabled if `--high-noise-steps` is set to -1 only enabled if `--high-noise-steps` is set to -1
--flow-shift SHIFT shift value for Flow models like SD3.x or WAN (default: auto) --flow-shift SHIFT shift value for Flow models like SD3.x or WAN (default: auto)
--vace-strength wan vace strength
-v, --verbose print extra info -v, --verbose print extra info
``` ```

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -6,23 +6,29 @@
#include "unet.hpp" #include "unet.hpp"
#include "wan.hpp" #include "wan.hpp"
struct DiffusionParams {
struct ggml_tensor* x = NULL;
struct ggml_tensor* timesteps = NULL;
struct ggml_tensor* context = NULL;
struct ggml_tensor* c_concat = NULL;
struct ggml_tensor* y = NULL;
struct ggml_tensor* guidance = NULL;
std::vector<ggml_tensor*> ref_latents = {};
bool increase_ref_index = false;
int num_video_frames = -1;
std::vector<struct ggml_tensor*> controls = {};
float control_strength = 0.f;
struct ggml_tensor* vace_context = NULL;
float vace_strength = 1.f;
std::vector<int> skip_layers = {};
};
struct DiffusionModel { struct DiffusionModel {
virtual std::string get_desc() = 0; virtual std::string get_desc() = 0;
virtual void compute(int n_threads, virtual void compute(int n_threads,
struct ggml_tensor* x, DiffusionParams diffusion_params,
struct ggml_tensor* timesteps, struct ggml_tensor** output = NULL,
struct ggml_tensor* context, struct ggml_context* output_ctx = NULL) = 0;
struct ggml_tensor* c_concat,
struct ggml_tensor* y,
struct ggml_tensor* guidance,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false,
int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) = 0;
virtual void alloc_params_buffer() = 0; virtual void alloc_params_buffer() = 0;
virtual void free_params_buffer() = 0; virtual void free_params_buffer() = 0;
virtual void free_compute_buffer() = 0; virtual void free_compute_buffer() = 0;
@ -71,22 +77,18 @@ struct UNetModel : public DiffusionModel {
} }
void compute(int n_threads, void compute(int n_threads,
struct ggml_tensor* x, DiffusionParams diffusion_params,
struct ggml_tensor* timesteps, struct ggml_tensor** output = NULL,
struct ggml_tensor* context, struct ggml_context* output_ctx = NULL) {
struct ggml_tensor* c_concat, return unet.compute(n_threads,
struct ggml_tensor* y, diffusion_params.x,
struct ggml_tensor* guidance, diffusion_params.timesteps,
std::vector<ggml_tensor*> ref_latents = {}, diffusion_params.context,
bool increase_ref_index = false, diffusion_params.c_concat,
int num_video_frames = -1, diffusion_params.y,
std::vector<struct ggml_tensor*> controls = {}, diffusion_params.num_video_frames,
float control_strength = 0.f, diffusion_params.controls,
struct ggml_tensor** output = NULL, diffusion_params.control_strength, output, output_ctx);
struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) {
(void)skip_layers; // SLG doesn't work with UNet models
return unet.compute(n_threads, x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength, output, output_ctx);
} }
}; };
@ -129,21 +131,17 @@ struct MMDiTModel : public DiffusionModel {
} }
void compute(int n_threads, void compute(int n_threads,
struct ggml_tensor* x, DiffusionParams diffusion_params,
struct ggml_tensor* timesteps, struct ggml_tensor** output = NULL,
struct ggml_tensor* context, struct ggml_context* output_ctx = NULL) {
struct ggml_tensor* c_concat, return mmdit.compute(n_threads,
struct ggml_tensor* y, diffusion_params.x,
struct ggml_tensor* guidance, diffusion_params.timesteps,
std::vector<ggml_tensor*> ref_latents = {}, diffusion_params.context,
bool increase_ref_index = false, diffusion_params.y,
int num_video_frames = -1, output,
std::vector<struct ggml_tensor*> controls = {}, output_ctx,
float control_strength = 0.f, diffusion_params.skip_layers);
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL,
std::vector<int> skip_layers = std::vector<int>()) {
return mmdit.compute(n_threads, x, timesteps, context, y, output, output_ctx, skip_layers);
} }
}; };
@ -188,21 +186,21 @@ struct FluxModel : public DiffusionModel {
} }
void compute(int n_threads, void compute(int n_threads,
struct ggml_tensor* x, DiffusionParams diffusion_params,
struct ggml_tensor* timesteps, struct ggml_tensor** output = NULL,
struct ggml_tensor* context, struct ggml_context* output_ctx = NULL) {
struct ggml_tensor* c_concat, return flux.compute(n_threads,
struct ggml_tensor* y, diffusion_params.x,
struct ggml_tensor* guidance, diffusion_params.timesteps,
std::vector<ggml_tensor*> ref_latents = {}, diffusion_params.context,
bool increase_ref_index = false, diffusion_params.c_concat,
int num_video_frames = -1, diffusion_params.y,
std::vector<struct ggml_tensor*> controls = {}, diffusion_params.guidance,
float control_strength = 0.f, diffusion_params.ref_latents,
struct ggml_tensor** output = NULL, diffusion_params.increase_ref_index,
struct ggml_context* output_ctx = NULL, output,
std::vector<int> skip_layers = std::vector<int>()) { output_ctx,
return flux.compute(n_threads, x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, output, output_ctx, skip_layers); diffusion_params.skip_layers);
} }
}; };
@ -248,21 +246,20 @@ struct WanModel : public DiffusionModel {
} }
void compute(int n_threads, void compute(int n_threads,
struct ggml_tensor* x, DiffusionParams diffusion_params,
struct ggml_tensor* timesteps, struct ggml_tensor** output = NULL,
struct ggml_tensor* context, struct ggml_context* output_ctx = NULL) {
struct ggml_tensor* c_concat, return wan.compute(n_threads,
struct ggml_tensor* y, diffusion_params.x,
struct ggml_tensor* guidance, diffusion_params.timesteps,
std::vector<ggml_tensor*> ref_latents = {}, diffusion_params.context,
bool increase_ref_index = false, diffusion_params.y,
int num_video_frames = -1, diffusion_params.c_concat,
std::vector<struct ggml_tensor*> controls = {}, NULL,
float control_strength = 0.f, diffusion_params.vace_context,
struct ggml_tensor** output = NULL, diffusion_params.vace_strength,
struct ggml_context* output_ctx = NULL, output,
std::vector<int> skip_layers = std::vector<int>()) { output_ctx);
return wan.compute(n_threads, x, timesteps, context, y, c_concat, NULL, output, output_ctx);
} }
}; };

View File

@ -18,6 +18,12 @@
- Wan2.1 FLF2V 14B 720P - Wan2.1 FLF2V 14B 720P
- safetensors: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models - safetensors: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models
- gguf: https://huggingface.co/city96/Wan2.1-FLF2V-14B-720P-gguf/tree/main - gguf: https://huggingface.co/city96/Wan2.1-FLF2V-14B-720P-gguf/tree/main
- Wan2.1 VACE 1.3B
- safetensors: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models
- gguf: https://huggingface.co/calcuis/wan-1.3b-gguf/tree/main
- Wan2.1 VACE 14B
- safetensors: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models
- gguf: https://huggingface.co/QuantStack/Wan2.1_14B_VACE-GGUF/tree/main
- Wan2.2 - Wan2.2
- Wan2.2 TI2V 5B - Wan2.2 TI2V 5B
- safetensors: https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/tree/main/split_files/diffusion_models - safetensors: https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/tree/main/split_files/diffusion_models
@ -137,3 +143,62 @@
``` ```
<video src=../assets/wan/Wan2.2_14B_flf2v.mp4 controls="controls" muted="muted" type="video/mp4"></video> <video src=../assets/wan/Wan2.2_14B_flf2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>
### Wan2.1 VACE 1.3B
#### T2V
```
.\bin\Release\sd.exe -M vid_gen --diffusion-model ..\..\ComfyUI\models\diffusion_models\wan2.1-vace-1.3b-q8_0.gguf --vae ..\..\ComfyUI\models\vae\wan_2.1_vae.safetensors --t5xxl ..\..\ComfyUI\models\text_encoders\umt5-xxl-encoder-Q8_0.gguf -p "a lovely cat" --cfg-scale 6.0 --sampling-method euler -v -n "色调艳丽过曝静态细节模糊不清字幕风格作品画作画面静止整体发灰最差质量低质量JPEG压缩残留丑陋的残缺的多余的手指画得不好的手部画得不好的脸部 畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" -W 832 -H 480 --diffusion-fa --video-frames 1 --offload-to-cpu
```
<video src=../assets/wan/Wan2.1_1.3B_vace_t2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>
#### R2V
```
.\bin\Release\sd.exe -M vid_gen --diffusion-model ..\..\ComfyUI\models\diffusion_models\wan2.1-vace-1.3b-q8_0.gguf --vae ..\..\ComfyUI\models\vae\wan_2.1_vae.safetensors --t5xxl ..\..\ComfyUI\models\text_encoders\umt5-xxl-encoder-Q8_0.gguf -p "a lovely cat" --cfg-scale 6.0 --sampling-method euler -v -n "色调艳丽过曝静态细节模糊不清字幕风格作品画作画面静止整体发灰最差质量低质量JPEG压缩残留丑陋的残缺的多余的手指画得不好的手部画得不好的脸部 畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" -W 832 -H 480 --diffusion-fa -i ..\assets\cat_with_sd_cpp_42.png --video-frames 33 --offload-to-cpu
```
<video src=../assets/wan/Wan2.1_1.3B_vace_r2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>
#### V2V
```
mkdir post+depth
ffmpeg -i ..\..\ComfyUI\input\post+depth.mp4 -qscale:v 1 -vf fps=8 post+depth\frame_%04d.jpg
.\bin\Release\sd.exe -M vid_gen --diffusion-model ..\..\ComfyUI\models\diffusion_models\wan2.1-vace-1.3b-q8_0.gguf --vae ..\..\ComfyUI\models\vae\wan_2.1_vae.safetensors --t5xxl ..\..\ComfyUI\models\text_encoders\umt5-xxl-encoder-Q8_0.gguf -p "The girl is dancing in a sea of flowers, slowly moving her hands. There is a close - up shot of her upper body. The character is surrounded by other transparent glass flowers in the style of Nicoletta Ceccoli, creating a beautiful, surreal, and emotionally expressive movie scene with a white. transparent feel and a dreamyl atmosphere." --cfg-scale 6.0 --sampling-method euler -v -n "色调艳丽过曝静态细节模糊不清字幕风格作品画作画面静止整体发灰最差质量低质量JPEG压缩残留丑陋的残缺的多余的手指画得不好的手部画得不好的脸部 畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" -W 480 -H 832 --diffusion-fa -i ..\..\ComfyUI\input\dance_girl.jpg --control-video ./post+depth --video-frames 33 --offload-to-cpu
```
<video src=../assets/wan/Wan2.1_1.3B_vace_v2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>
### Wan2.1 VACE 14B
#### T2V
```
.\bin\Release\sd.exe -M vid_gen --diffusion-model ..\..\ComfyUI\models\diffusion_models\Wan2.1_14B_VACE-Q8_0.gguf --vae ..\..\ComfyUI\models\vae\wan_2.1_vae.safetensors --t5xxl ..\..\ComfyUI\models\text_encoders\umt5-xxl-encoder-Q8_0.gguf -p "a lovely cat" --cfg-scale 6.0 --sampling-method euler -v -n "色调艳丽过曝静态细节模糊不清字幕风格作品画作画面静止整体发灰最差质量低质量JPEG压缩残留丑陋的残缺的多余的手指画得不好的手部画得不好的脸部 畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" -W 832 -H 480 --diffusion-fa --video-frames 33 --offload-to-cpu
```
<video src=../assets/wan/Wan2.1_14B_vace_t2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>
#### R2V
```
.\bin\Release\sd.exe -M vid_gen --diffusion-model ..\..\ComfyUI\models\diffusion_models\Wan2.1_14B_VACE-Q8_0.gguf --vae ..\..\ComfyUI\models\vae\wan_2.1_vae.safetensors --t5xxl ..\..\ComfyUI\models\text_encoders\umt5-xxl-encoder-Q8_0.gguf -p "a lovely cat" --cfg-scale 6.0 --sampling-method euler -v -n "色调艳丽过曝静态细节模糊不清字幕风格作品画作画面静止整体发灰最差质量低质量JPEG压缩残留丑陋的残缺的多余的手指画得不好的手部画得不好的脸部 畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" -W 832 -H 480 --diffusion-fa -i ..\assets\cat_with_sd_cpp_42.png --video-frames 33 --offload-to-cpu
```
<video src=../assets/wan/Wan2.1_14B_vace_r2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>
#### V2V
```
.\bin\Release\sd.exe -M vid_gen --diffusion-model ..\..\ComfyUI\models\diffusion_models\Wan2.1_14B_VACE-Q8_0.gguf --vae ..\..\ComfyUI\models\vae\wan_2.1_vae.safetensors --t5xxl ..\..\ComfyUI\models\text_encoders\umt5-xxl-encoder-Q8_0.gguf -p "The girl is dancing in a sea of flowers, slowly moving her hands. There is a close - up shot of her upper body. The character is surrounded by other transparent glass flowers in the style of Nicoletta Ceccoli, creating a beautiful, surreal, and emotionally expressive movie scene with a white. transparent feel and a dreamyl atmosphere." --cfg-scale 6.0 --sampling-method euler -v -n "色调艳丽过曝静态细节模糊不清字幕风格作品画作画面静止整体发灰最差质量低质量JPEG压缩残留丑陋的残缺的多余的手指画得不好的手部画得不好的脸部 畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" -W 480 -H 832 --diffusion-fa -i ..\..\ComfyUI\input\dance_girl.jpg --control-video ./post+depth --video-frames 33 --offload-to-cpu
```
<video src=../assets/wan/Wan2.1_14B_vace_v2v.mp4 controls="controls" muted="muted" type="video/mp4"></video>

View File

@ -35,6 +35,8 @@
#define SAFE_STR(s) ((s) ? (s) : "") #define SAFE_STR(s) ((s) ? (s) : "")
#define BOOL_STR(b) ((b) ? "true" : "false") #define BOOL_STR(b) ((b) ? "true" : "false")
namespace fs = std::filesystem;
const char* modes_str[] = { const char* modes_str[] = {
"img_gen", "img_gen",
"vid_gen", "vid_gen",
@ -75,6 +77,7 @@ struct SDParams {
std::string mask_image_path; std::string mask_image_path;
std::string control_image_path; std::string control_image_path;
std::vector<std::string> ref_image_paths; std::vector<std::string> ref_image_paths;
std::string control_video_path;
bool increase_ref_index = false; bool increase_ref_index = false;
std::string prompt; std::string prompt;
@ -91,10 +94,10 @@ struct SDParams {
std::vector<int> high_noise_skip_layers = {7, 8, 9}; std::vector<int> high_noise_skip_layers = {7, 8, 9};
sd_sample_params_t high_noise_sample_params; sd_sample_params_t high_noise_sample_params;
float moe_boundary = 0.875f; float moe_boundary = 0.875f;
int video_frames = 1;
int video_frames = 1; int fps = 16;
int fps = 16; float vace_strength = 1.f;
float strength = 0.75f; float strength = 0.75f;
float control_strength = 0.9f; float control_strength = 0.9f;
@ -159,6 +162,7 @@ void print_params(SDParams params) {
for (auto& path : params.ref_image_paths) { for (auto& path : params.ref_image_paths) {
printf(" %s\n", path.c_str()); printf(" %s\n", path.c_str());
}; };
printf(" control_video_path: %s\n", params.control_video_path.c_str());
printf(" increase_ref_index: %s\n", params.increase_ref_index ? "true" : "false"); printf(" increase_ref_index: %s\n", params.increase_ref_index ? "true" : "false");
printf(" offload_params_to_cpu: %s\n", params.offload_params_to_cpu ? "true" : "false"); printf(" offload_params_to_cpu: %s\n", params.offload_params_to_cpu ? "true" : "false");
printf(" clip_on_cpu: %s\n", params.clip_on_cpu ? "true" : "false"); printf(" clip_on_cpu: %s\n", params.clip_on_cpu ? "true" : "false");
@ -179,7 +183,7 @@ void print_params(SDParams params) {
printf(" flow_shift: %.2f\n", params.flow_shift); printf(" flow_shift: %.2f\n", params.flow_shift);
printf(" strength(img2img): %.2f\n", params.strength); printf(" strength(img2img): %.2f\n", params.strength);
printf(" rng: %s\n", sd_rng_type_name(params.rng_type)); printf(" rng: %s\n", sd_rng_type_name(params.rng_type));
printf(" seed: %ld\n", params.seed); printf(" seed: %zd\n", params.seed);
printf(" batch_count: %d\n", params.batch_count); printf(" batch_count: %d\n", params.batch_count);
printf(" vae_tiling: %s\n", params.vae_tiling_params.enabled ? "true" : "false"); printf(" vae_tiling: %s\n", params.vae_tiling_params.enabled ? "true" : "false");
printf(" upscale_repeats: %d\n", params.upscale_repeats); printf(" upscale_repeats: %d\n", params.upscale_repeats);
@ -187,6 +191,7 @@ void print_params(SDParams params) {
printf(" chroma_use_t5_mask: %s\n", params.chroma_use_t5_mask ? "true" : "false"); printf(" chroma_use_t5_mask: %s\n", params.chroma_use_t5_mask ? "true" : "false");
printf(" chroma_t5_mask_pad: %d\n", params.chroma_t5_mask_pad); printf(" chroma_t5_mask_pad: %d\n", params.chroma_t5_mask_pad);
printf(" video_frames: %d\n", params.video_frames); printf(" video_frames: %d\n", params.video_frames);
printf(" vace_strength: %.2f\n", params.vace_strength);
printf(" fps: %d\n", params.fps); printf(" fps: %d\n", params.fps);
free(sample_params_str); free(sample_params_str);
free(high_noise_sample_params_str); free(high_noise_sample_params_str);
@ -226,6 +231,9 @@ void print_usage(int argc, const char* argv[]) {
printf(" -i, --end-img [IMAGE] path to the end image, required by flf2v\n"); printf(" -i, --end-img [IMAGE] path to the end image, required by flf2v\n");
printf(" --control-image [IMAGE] path to image condition, control net\n"); printf(" --control-image [IMAGE] path to image condition, control net\n");
printf(" -r, --ref-image [PATH] reference image for Flux Kontext models (can be used multiple times) \n"); printf(" -r, --ref-image [PATH] reference image for Flux Kontext models (can be used multiple times) \n");
printf(" --control-video [PATH] path to control video frames, It must be a directory path.\n");
printf(" The video frames inside should be stored as images in lexicographical (character) order\n");
printf(" For example, if the control video path is `frames`, the directory contain images such as 00.png, 01.png, … etc.\n");
printf(" --increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).\n"); printf(" --increase-ref-index automatically increase the indices of references images based on the order they are listed (starting with 1).\n");
printf(" -o, --output OUTPUT path to write result image to (default: ./output.png)\n"); printf(" -o, --output OUTPUT path to write result image to (default: ./output.png)\n");
printf(" -p, --prompt [PROMPT] the prompt to render\n"); printf(" -p, --prompt [PROMPT] the prompt to render\n");
@ -292,6 +300,7 @@ void print_usage(int argc, const char* argv[]) {
printf(" --moe-boundary BOUNDARY timestep boundary for Wan2.2 MoE model. (default: 0.875)\n"); printf(" --moe-boundary BOUNDARY timestep boundary for Wan2.2 MoE model. (default: 0.875)\n");
printf(" only enabled if `--high-noise-steps` is set to -1\n"); printf(" only enabled if `--high-noise-steps` is set to -1\n");
printf(" --flow-shift SHIFT shift value for Flow models like SD3.x or WAN (default: auto)\n"); printf(" --flow-shift SHIFT shift value for Flow models like SD3.x or WAN (default: auto)\n");
printf(" --vace-strength wan vace strength\n");
printf(" -v, --verbose print extra info\n"); printf(" -v, --verbose print extra info\n");
} }
@ -486,6 +495,7 @@ void parse_args(int argc, const char** argv, SDParams& params) {
{"", "--input-id-images-dir", "", &params.input_id_images_path}, {"", "--input-id-images-dir", "", &params.input_id_images_path},
{"", "--mask", "", &params.mask_image_path}, {"", "--mask", "", &params.mask_image_path},
{"", "--control-image", "", &params.control_image_path}, {"", "--control-image", "", &params.control_image_path},
{"", "--control-video", "", &params.control_video_path},
{"-o", "--output", "", &params.output_path}, {"-o", "--output", "", &params.output_path},
{"-p", "--prompt", "", &params.prompt}, {"-p", "--prompt", "", &params.prompt},
{"-n", "--negative-prompt", "", &params.negative_prompt}, {"-n", "--negative-prompt", "", &params.negative_prompt},
@ -526,6 +536,7 @@ void parse_args(int argc, const char** argv, SDParams& params) {
{"", "--control-strength", "", &params.control_strength}, {"", "--control-strength", "", &params.control_strength},
{"", "--moe-boundary", "", &params.moe_boundary}, {"", "--moe-boundary", "", &params.moe_boundary},
{"", "--flow-shift", "", &params.flow_shift}, {"", "--flow-shift", "", &params.flow_shift},
{"", "--vace-strength", "", &params.vace_strength},
{"", "--vae-tile-overlap", "", &params.vae_tiling_params.target_overlap}, {"", "--vae-tile-overlap", "", &params.vae_tiling_params.target_overlap},
}; };
@ -1111,6 +1122,7 @@ int main(int argc, const char* argv[]) {
sd_image_t control_image = {(uint32_t)params.width, (uint32_t)params.height, 3, NULL}; sd_image_t control_image = {(uint32_t)params.width, (uint32_t)params.height, 3, NULL};
sd_image_t mask_image = {(uint32_t)params.width, (uint32_t)params.height, 1, NULL}; sd_image_t mask_image = {(uint32_t)params.width, (uint32_t)params.height, 1, NULL};
std::vector<sd_image_t> ref_images; std::vector<sd_image_t> ref_images;
std::vector<sd_image_t> control_frames;
auto release_all_resources = [&]() { auto release_all_resources = [&]() {
free(init_image.data); free(init_image.data);
@ -1122,6 +1134,11 @@ int main(int argc, const char* argv[]) {
ref_image.data = NULL; ref_image.data = NULL;
} }
ref_images.clear(); ref_images.clear();
for (auto frame : control_frames) {
free(frame.data);
frame.data = NULL;
}
control_frames.clear();
}; };
if (params.init_image_path.size() > 0) { if (params.init_image_path.size() > 0) {
@ -1180,14 +1197,12 @@ int main(int argc, const char* argv[]) {
return 1; return 1;
} }
if (params.canny_preprocess) { // apply preprocessor if (params.canny_preprocess) { // apply preprocessor
control_image.data = preprocess_canny(control_image.data, preprocess_canny(control_image,
control_image.width, 0.08f,
control_image.height, 0.08f,
0.08f, 0.8f,
0.08f, 1.0f,
0.8f, false);
1.0f,
false);
} }
} }
@ -1209,6 +1224,48 @@ int main(int argc, const char* argv[]) {
} }
} }
if (!params.control_video_path.empty()) {
std::string dir = params.control_video_path;
if (!fs::exists(dir) || !fs::is_directory(dir)) {
fprintf(stderr, "'%s' is not a valid directory\n", dir.c_str());
release_all_resources();
return 1;
}
for (const auto& entry : fs::directory_iterator(dir)) {
if (!entry.is_regular_file())
continue;
std::string path = entry.path().string();
std::string ext = entry.path().extension().string();
std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
if (ext == ".jpg" || ext == ".jpeg" || ext == ".png" || ext == ".bmp") {
if (params.verbose) {
printf("load control frame %zu from '%s'\n", control_frames.size(), path.c_str());
}
int width = 0;
int height = 0;
uint8_t* image_buffer = load_image(path.c_str(), width, height, params.width, params.height);
if (image_buffer == NULL) {
fprintf(stderr, "load image from '%s' failed\n", path.c_str());
release_all_resources();
return 1;
}
control_frames.push_back({(uint32_t)params.width,
(uint32_t)params.height,
3,
image_buffer});
if (control_frames.size() >= params.video_frames) {
break;
}
}
}
}
if (params.mode == VID_GEN) { if (params.mode == VID_GEN) {
vae_decode_only = false; vae_decode_only = false;
} }
@ -1292,6 +1349,8 @@ int main(int argc, const char* argv[]) {
params.clip_skip, params.clip_skip,
init_image, init_image,
end_image, end_image,
control_frames.data(),
(int)control_frames.size(),
params.width, params.width,
params.height, params.height,
params.sample_params, params.sample_params,
@ -1300,6 +1359,7 @@ int main(int argc, const char* argv[]) {
params.strength, params.strength,
params.seed, params.seed,
params.video_frames, params.video_frames,
params.vace_strength,
}; };
results = generate_video(sd_ctx, &vid_gen_params, &num_results); results = generate_video(sd_ctx, &vid_gen_params, &num_results);
@ -1342,7 +1402,6 @@ int main(int argc, const char* argv[]) {
// create directory if not exists // create directory if not exists
{ {
namespace fs = std::filesystem;
const fs::path out_path = params.output_path; const fs::path out_path = params.output_path;
if (const fs::path out_dir = out_path.parent_path(); !out_dir.empty()) { if (const fs::path out_dir = out_path.parent_path(); !out_dir.empty()) {
std::error_code ec; std::error_code ec;

View File

@ -185,6 +185,14 @@ __STATIC_INLINE__ ggml_fp16_t ggml_tensor_get_f16(const ggml_tensor* tensor, int
return *(ggml_fp16_t*)((char*)(tensor->data) + i * tensor->nb[3] + j * tensor->nb[2] + k * tensor->nb[1] + l * tensor->nb[0]); return *(ggml_fp16_t*)((char*)(tensor->data) + i * tensor->nb[3] + j * tensor->nb[2] + k * tensor->nb[1] + l * tensor->nb[0]);
} }
__STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int iw, int ih, int ic, bool scale = true) {
float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic);
if (scale) {
value /= 255.f;
}
return value;
}
static struct ggml_tensor* get_tensor_from_graph(struct ggml_cgraph* gf, const char* name) { static struct ggml_tensor* get_tensor_from_graph(struct ggml_cgraph* gf, const char* name) {
struct ggml_tensor* res = NULL; struct ggml_tensor* res = NULL;
for (int i = 0; i < ggml_graph_n_nodes(gf); i++) { for (int i = 0; i < ggml_graph_n_nodes(gf); i++) {
@ -235,6 +243,52 @@ __STATIC_INLINE__ void print_ggml_tensor(struct ggml_tensor* tensor, bool shape_
} }
} }
__STATIC_INLINE__ void ggml_tensor_iter(
ggml_tensor* tensor,
const std::function<void(ggml_tensor*, int64_t, int64_t, int64_t, int64_t)>& fn) {
int64_t n0 = tensor->ne[0];
int64_t n1 = tensor->ne[1];
int64_t n2 = tensor->ne[2];
int64_t n3 = tensor->ne[3];
for (int64_t i3 = 0; i3 < n3; i3++) {
for (int64_t i2 = 0; i2 < n2; i2++) {
for (int64_t i1 = 0; i1 < n1; i1++) {
for (int64_t i0 = 0; i0 < n0; i0++) {
fn(tensor, i0, i1, i2, i3);
}
}
}
}
}
__STATIC_INLINE__ void ggml_tensor_iter(
ggml_tensor* tensor,
const std::function<void(ggml_tensor*, int64_t)>& fn) {
int64_t n0 = tensor->ne[0];
int64_t n1 = tensor->ne[1];
int64_t n2 = tensor->ne[2];
int64_t n3 = tensor->ne[3];
for (int64_t i = 0; i < ggml_nelements(tensor); i++) {
fn(tensor, i);
}
}
__STATIC_INLINE__ void ggml_tensor_diff(
ggml_tensor* a,
ggml_tensor* b,
float gap = 0.1f) {
GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
ggml_tensor_iter(a, [&](ggml_tensor* a, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float a_value = ggml_tensor_get_f32(a, i0, i1, i2, i3);
float b_value = ggml_tensor_get_f32(b, i0, i1, i2, i3);
if (abs(a_value - b_value) > gap) {
LOG_WARN("[%ld, %ld, %ld, %ld] %f %f", i3, i2, i1, i0, a_value, b_value);
}
});
}
__STATIC_INLINE__ ggml_tensor* load_tensor_from_file(ggml_context* ctx, const std::string& file_path) { __STATIC_INLINE__ ggml_tensor* load_tensor_from_file(ggml_context* ctx, const std::string& file_path) {
std::ifstream file(file_path, std::ios::binary); std::ifstream file(file_path, std::ios::binary);
if (!file.is_open()) { if (!file.is_open()) {
@ -366,42 +420,18 @@ __STATIC_INLINE__ uint8_t* sd_tensor_to_image(struct ggml_tensor* input, int idx
return image_data; return image_data;
} }
__STATIC_INLINE__ void sd_image_to_tensor(const uint8_t* image_data, __STATIC_INLINE__ void sd_image_to_tensor(sd_image_t image,
struct ggml_tensor* output, ggml_tensor* tensor,
bool scale = true) { bool scale = true) {
int64_t width = output->ne[0]; GGML_ASSERT(image.width == tensor->ne[0]);
int64_t height = output->ne[1]; GGML_ASSERT(image.height == tensor->ne[1]);
int64_t channels = output->ne[2]; GGML_ASSERT(image.channel == tensor->ne[2]);
GGML_ASSERT(channels == 3 && output->type == GGML_TYPE_F32); GGML_ASSERT(1 == tensor->ne[3]);
for (int iy = 0; iy < height; iy++) { GGML_ASSERT(tensor->type == GGML_TYPE_F32);
for (int ix = 0; ix < width; ix++) { ggml_tensor_iter(tensor, [&](ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
for (int k = 0; k < channels; k++) { float value = sd_image_get_f32(image, i0, i1, i2, scale);
float value = *(image_data + iy * width * channels + ix * channels + k); ggml_tensor_set_f32(tensor, value, i0, i1, i2, i3);
if (scale) { });
value /= 255.f;
}
ggml_tensor_set_f32(output, value, ix, iy, k);
}
}
}
}
__STATIC_INLINE__ void sd_mask_to_tensor(const uint8_t* image_data,
struct ggml_tensor* output,
bool scale = true) {
int64_t width = output->ne[0];
int64_t height = output->ne[1];
int64_t channels = output->ne[2];
GGML_ASSERT(channels == 1 && output->type == GGML_TYPE_F32);
for (int iy = 0; iy < height; iy++) {
for (int ix = 0; ix < width; ix++) {
float value = *(image_data + iy * width * channels + ix);
if (scale) {
value /= 255.f;
}
ggml_tensor_set_f32(output, value, ix, iy);
}
}
} }
__STATIC_INLINE__ void sd_apply_mask(struct ggml_tensor* image_data, __STATIC_INLINE__ void sd_apply_mask(struct ggml_tensor* image_data,
@ -1636,6 +1666,7 @@ protected:
ggml_backend_tensor_copy(t, offload_t); ggml_backend_tensor_copy(t, offload_t);
std::swap(t->buffer, offload_t->buffer); std::swap(t->buffer, offload_t->buffer);
std::swap(t->data, offload_t->data); std::swap(t->data, offload_t->data);
std::swap(t->extra, offload_t->extra);
t = ggml_get_next_tensor(params_ctx, t); t = ggml_get_next_tensor(params_ctx, t);
offload_t = ggml_get_next_tensor(offload_ctx, offload_t); offload_t = ggml_get_next_tensor(offload_ctx, offload_t);
@ -1666,8 +1697,10 @@ protected:
while (t != NULL && offload_t != NULL) { while (t != NULL && offload_t != NULL) {
t->buffer = offload_t->buffer; t->buffer = offload_t->buffer;
t->data = offload_t->data; t->data = offload_t->data;
t->extra = offload_t->extra;
offload_t->buffer = NULL; offload_t->buffer = NULL;
offload_t->data = NULL; offload_t->data = NULL;
offload_t->extra = NULL;
t = ggml_get_next_tensor(params_ctx, t); t = ggml_get_next_tensor(params_ctx, t);
offload_t = ggml_get_next_tensor(offload_ctx, offload_t); offload_t = ggml_get_next_tensor(offload_ctx, offload_t);

View File

@ -162,7 +162,7 @@ void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float lo
} }
} }
uint8_t* preprocess_canny(uint8_t* img, int width, int height, float high_threshold, float low_threshold, float weak, float strong, bool inverse) { bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10MB
params.mem_buffer = NULL; params.mem_buffer = NULL;
@ -171,7 +171,7 @@ uint8_t* preprocess_canny(uint8_t* img, int width, int height, float high_thresh
if (!work_ctx) { if (!work_ctx) {
LOG_ERROR("ggml_init() failed"); LOG_ERROR("ggml_init() failed");
return NULL; return false;
} }
float kX[9] = { float kX[9] = {
@ -192,8 +192,8 @@ uint8_t* preprocess_canny(uint8_t* img, int width, int height, float high_thresh
struct ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1); struct ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky)); memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
gaussian_kernel(gkernel); gaussian_kernel(gkernel);
struct ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1); struct ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
struct ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 1, 1); struct ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
struct ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray); struct ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
struct ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray); struct ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
struct ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray); struct ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
@ -209,8 +209,8 @@ uint8_t* preprocess_canny(uint8_t* img, int width, int height, float high_thresh
non_max_supression(image_gray, G, tetha); non_max_supression(image_gray, G, tetha);
threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong); threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong);
// to RGB channels // to RGB channels
for (int iy = 0; iy < height; iy++) { for (int iy = 0; iy < img.height; iy++) {
for (int ix = 0; ix < width; ix++) { for (int ix = 0; ix < img.width; ix++) {
float gray = ggml_tensor_get_f32(image_gray, ix, iy); float gray = ggml_tensor_get_f32(image_gray, ix, iy);
gray = inverse ? 1.0f - gray : gray; gray = inverse ? 1.0f - gray : gray;
ggml_tensor_set_f32(image, gray, ix, iy); ggml_tensor_set_f32(image, gray, ix, iy);
@ -218,10 +218,11 @@ uint8_t* preprocess_canny(uint8_t* img, int width, int height, float high_thresh
ggml_tensor_set_f32(image, gray, ix, iy, 2); ggml_tensor_set_f32(image, gray, ix, iy, 2);
} }
} }
free(img);
uint8_t* output = sd_tensor_to_image(image); uint8_t* output = sd_tensor_to_image(image);
free(img.data);
img.data = output;
ggml_free(work_ctx); ggml_free(work_ctx);
return output; return true;
} }
#endif // __PREPROCESSING_HPP__ #endif // __PREPROCESSING_HPP__

View File

@ -776,7 +776,12 @@ public:
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
struct ggml_tensor* out = ggml_dup_tensor(work_ctx, x_t); struct ggml_tensor* out = ggml_dup_tensor(work_ctx, x_t);
diffusion_model->compute(n_threads, x_t, timesteps, c, concat, NULL, NULL, {}, false, -1, {}, 0.f, &out); DiffusionParams diffusion_params;
diffusion_params.x = x_t;
diffusion_params.timesteps = timesteps;
diffusion_params.context = c;
diffusion_params.c_concat = concat;
diffusion_model->compute(n_threads, diffusion_params, &out);
diffusion_model->free_compute_buffer(); diffusion_model->free_compute_buffer();
double result = 0.f; double result = 0.f;
@ -954,7 +959,7 @@ public:
free(resized_image.data); free(resized_image.data);
resized_image.data = NULL; resized_image.data = NULL;
} else { } else {
sd_image_to_tensor(init_image.data, init_img); sd_image_to_tensor(init_image, init_img);
} }
if (augmentation_level > 0.f) { if (augmentation_level > 0.f) {
struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, init_img); struct ggml_tensor* noise = ggml_dup_tensor(work_ctx, init_img);
@ -1034,7 +1039,9 @@ public:
SDCondition id_cond, SDCondition id_cond,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
ggml_tensor* denoise_mask = nullptr) { ggml_tensor* denoise_mask = NULL,
ggml_tensor* vace_context = NULL,
float vace_strength = 1.f) {
std::vector<int> skip_layers(guidance.slg.layers, guidance.slg.layers + guidance.slg.layer_count); std::vector<int> skip_layers(guidance.slg.layers, guidance.slg.layers + guidance.slg.layer_count);
float cfg_scale = guidance.txt_cfg; float cfg_scale = guidance.txt_cfg;
@ -1118,34 +1125,31 @@ public:
// GGML_ASSERT(0); // GGML_ASSERT(0);
} }
DiffusionParams diffusion_params;
diffusion_params.x = noised_input;
diffusion_params.timesteps = timesteps;
diffusion_params.guidance = guidance_tensor;
diffusion_params.ref_latents = ref_latents;
diffusion_params.increase_ref_index = increase_ref_index;
diffusion_params.controls = controls;
diffusion_params.control_strength = control_strength;
diffusion_params.vace_context = vace_context;
diffusion_params.vace_strength = vace_strength;
if (start_merge_step == -1 || step <= start_merge_step) { if (start_merge_step == -1 || step <= start_merge_step) {
// cond // cond
diffusion_params.context = cond.c_crossattn;
diffusion_params.c_concat = cond.c_concat;
diffusion_params.y = cond.c_vector;
work_diffusion_model->compute(n_threads, work_diffusion_model->compute(n_threads,
noised_input, diffusion_params,
timesteps,
cond.c_crossattn,
cond.c_concat,
cond.c_vector,
guidance_tensor,
ref_latents,
increase_ref_index,
-1,
controls,
control_strength,
&out_cond); &out_cond);
} else { } else {
diffusion_params.context = id_cond.c_crossattn;
diffusion_params.c_concat = cond.c_concat;
diffusion_params.y = id_cond.c_vector;
work_diffusion_model->compute(n_threads, work_diffusion_model->compute(n_threads,
noised_input, diffusion_params,
timesteps,
id_cond.c_crossattn,
cond.c_concat,
id_cond.c_vector,
guidance_tensor,
ref_latents,
increase_ref_index,
-1,
controls,
control_strength,
&out_cond); &out_cond);
} }
@ -1156,36 +1160,23 @@ public:
control_net->compute(n_threads, noised_input, control_hint, timesteps, uncond.c_crossattn, uncond.c_vector); control_net->compute(n_threads, noised_input, control_hint, timesteps, uncond.c_crossattn, uncond.c_vector);
controls = control_net->controls; controls = control_net->controls;
} }
diffusion_params.controls = controls;
diffusion_params.context = uncond.c_crossattn;
diffusion_params.c_concat = uncond.c_concat;
diffusion_params.y = uncond.c_vector;
work_diffusion_model->compute(n_threads, work_diffusion_model->compute(n_threads,
noised_input, diffusion_params,
timesteps,
uncond.c_crossattn,
uncond.c_concat,
uncond.c_vector,
guidance_tensor,
ref_latents,
increase_ref_index,
-1,
controls,
control_strength,
&out_uncond); &out_uncond);
negative_data = (float*)out_uncond->data; negative_data = (float*)out_uncond->data;
} }
float* img_cond_data = NULL; float* img_cond_data = NULL;
if (has_img_cond) { if (has_img_cond) {
diffusion_params.context = img_cond.c_crossattn;
diffusion_params.c_concat = img_cond.c_concat;
diffusion_params.y = img_cond.c_vector;
work_diffusion_model->compute(n_threads, work_diffusion_model->compute(n_threads,
noised_input, diffusion_params,
timesteps,
img_cond.c_crossattn,
img_cond.c_concat,
img_cond.c_vector,
guidance_tensor,
ref_latents,
increase_ref_index,
-1,
controls,
control_strength,
&out_img_cond); &out_img_cond);
img_cond_data = (float*)out_img_cond->data; img_cond_data = (float*)out_img_cond->data;
} }
@ -1196,21 +1187,13 @@ public:
if (is_skiplayer_step) { if (is_skiplayer_step) {
LOG_DEBUG("Skipping layers at step %d\n", step); LOG_DEBUG("Skipping layers at step %d\n", step);
// skip layer (same as conditionned) // skip layer (same as conditionned)
diffusion_params.context = cond.c_crossattn;
diffusion_params.c_concat = cond.c_concat;
diffusion_params.y = cond.c_vector;
diffusion_params.skip_layers = skip_layers;
work_diffusion_model->compute(n_threads, work_diffusion_model->compute(n_threads,
noised_input, diffusion_params,
timesteps, &out_skip);
cond.c_crossattn,
cond.c_concat,
cond.c_vector,
guidance_tensor,
ref_latents,
increase_ref_index,
-1,
controls,
control_strength,
&out_skip,
NULL,
skip_layers);
skip_layer_data = (float*)out_skip->data; skip_layer_data = (float*)out_skip->data;
} }
float* vec_denoised = (float*)denoised->data; float* vec_denoised = (float*)denoised->data;
@ -1826,6 +1809,7 @@ void sd_vid_gen_params_init(sd_vid_gen_params_t* sd_vid_gen_params) {
sd_vid_gen_params->seed = -1; sd_vid_gen_params->seed = -1;
sd_vid_gen_params->video_frames = 6; sd_vid_gen_params->video_frames = 6;
sd_vid_gen_params->moe_boundary = 0.875f; sd_vid_gen_params->moe_boundary = 0.875f;
sd_vid_gen_params->vace_strength = 1.f;
} }
struct sd_ctx_t { struct sd_ctx_t {
@ -2056,7 +2040,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
struct ggml_tensor* image_hint = NULL; struct ggml_tensor* image_hint = NULL;
if (control_image.data != NULL) { if (control_image.data != NULL) {
image_hint = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1); image_hint = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1);
sd_image_to_tensor(control_image.data, image_hint); sd_image_to_tensor(control_image, image_hint);
} }
// Sample // Sample
@ -2306,8 +2290,8 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
ggml_tensor* init_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1); ggml_tensor* init_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1);
ggml_tensor* mask_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 1, 1); ggml_tensor* mask_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 1, 1);
sd_mask_to_tensor(sd_img_gen_params->mask_image.data, mask_img); sd_image_to_tensor(sd_img_gen_params->mask_image, mask_img);
sd_image_to_tensor(sd_img_gen_params->init_image.data, init_img); sd_image_to_tensor(sd_img_gen_params->init_image, init_img);
if (sd_version_is_inpaint(sd_ctx->sd->version)) { if (sd_version_is_inpaint(sd_ctx->sd->version)) {
int64_t mask_channels = 1; int64_t mask_channels = 1;
@ -2398,7 +2382,7 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
sd_img_gen_params->ref_images[i].height, sd_img_gen_params->ref_images[i].height,
3, 3,
1); 1);
sd_image_to_tensor(sd_img_gen_params->ref_images[i].data, img); sd_image_to_tensor(sd_img_gen_params->ref_images[i], img);
ggml_tensor* latent = NULL; ggml_tensor* latent = NULL;
if (sd_ctx->sd->use_tiny_autoencoder) { if (sd_ctx->sd->use_tiny_autoencoder) {
@ -2504,7 +2488,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
} }
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = NULL; params.mem_buffer = NULL;
params.no_alloc = false; params.no_alloc = false;
// LOG_DEBUG("mem_size %u ", params.mem_size); // LOG_DEBUG("mem_size %u ", params.mem_size);
@ -2531,6 +2515,8 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
ggml_tensor* clip_vision_output = NULL; ggml_tensor* clip_vision_output = NULL;
ggml_tensor* concat_latent = NULL; ggml_tensor* concat_latent = NULL;
ggml_tensor* denoise_mask = NULL; ggml_tensor* denoise_mask = NULL;
ggml_tensor* vace_context = NULL;
int64_t ref_image_num = 0; // for vace
if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-I2V-14B" || if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-I2V-14B" ||
sd_ctx->sd->diffusion_model->get_desc() == "Wan2.2-I2V-14B" || sd_ctx->sd->diffusion_model->get_desc() == "Wan2.2-I2V-14B" ||
sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-FLF2V-14B") { sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-FLF2V-14B") {
@ -2560,23 +2546,17 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, frames, 3); ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, frames, 3);
for (int i3 = 0; i3 < image->ne[3]; i3++) { // channels ggml_tensor_iter(image, [&](ggml_tensor* image, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
for (int i2 = 0; i2 < image->ne[2]; i2++) { float value = 0.5f;
for (int i1 = 0; i1 < image->ne[1]; i1++) { // height if (i2 == 0 && sd_vid_gen_params->init_image.data) { // start image
for (int i0 = 0; i0 < image->ne[0]; i0++) { // width value = *(sd_vid_gen_params->init_image.data + i1 * width * 3 + i0 * 3 + i3);
float value = 0.5f; value /= 255.f;
if (i2 == 0 && sd_vid_gen_params->init_image.data) { // start image } else if (i2 == frames - 1 && sd_vid_gen_params->end_image.data) {
value = *(sd_vid_gen_params->init_image.data + i1 * width * 3 + i0 * 3 + i3); value = *(sd_vid_gen_params->end_image.data + i1 * width * 3 + i0 * 3 + i3);
value /= 255.f; value /= 255.f;
} else if (i2 == frames - 1 && sd_vid_gen_params->end_image.data) {
value = *(sd_vid_gen_params->end_image.data + i1 * width * 3 + i0 * 3 + i3);
value /= 255.f;
}
ggml_tensor_set_f32(image, value, i0, i1, i2, i3);
}
}
} }
} ggml_tensor_set_f32(image, value, i0, i1, i2, i3);
});
concat_latent = sd_ctx->sd->encode_first_stage(work_ctx, image); // [b*c, t, h/8, w/8] concat_latent = sd_ctx->sd->encode_first_stage(work_ctx, image); // [b*c, t, h/8, w/8]
@ -2591,21 +2571,15 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
concat_latent->ne[1], concat_latent->ne[1],
concat_latent->ne[2], concat_latent->ne[2],
4); // [b*4, t, w/8, h/8] 4); // [b*4, t, w/8, h/8]
for (int i3 = 0; i3 < concat_mask->ne[3]; i3++) { ggml_tensor_iter(concat_mask, [&](ggml_tensor* concat_mask, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
for (int i2 = 0; i2 < concat_mask->ne[2]; i2++) { float value = 0.0f;
for (int i1 = 0; i1 < concat_mask->ne[1]; i1++) { if (i2 == 0 && sd_vid_gen_params->init_image.data) { // start image
for (int i0 = 0; i0 < concat_mask->ne[0]; i0++) { value = 1.0f;
float value = 0.0f; } else if (i2 == frames - 1 && sd_vid_gen_params->end_image.data && i3 == 3) {
if (i2 == 0 && sd_vid_gen_params->init_image.data) { // start image value = 1.0f;
value = 1.0f;
} else if (i2 == frames - 1 && sd_vid_gen_params->end_image.data && i3 == 3) {
value = 1.0f;
}
ggml_tensor_set_f32(concat_mask, value, i0, i1, i2, i3);
}
}
} }
} ggml_tensor_set_f32(concat_mask, value, i0, i1, i2, i3);
});
concat_latent = ggml_tensor_concat(work_ctx, concat_mask, concat_latent, 3); // [b*(c+4), t, h/8, w/8] concat_latent = ggml_tensor_concat(work_ctx, concat_mask, concat_latent, 3); // [b*(c+4), t, h/8, w/8]
} else if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.2-TI2V-5B" && sd_vid_gen_params->init_image.data) { } else if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.2-TI2V-5B" && sd_vid_gen_params->init_image.data) {
@ -2613,7 +2587,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
ggml_tensor* init_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1); ggml_tensor* init_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1);
sd_image_to_tensor(sd_vid_gen_params->init_image.data, init_img); sd_image_to_tensor(sd_vid_gen_params->init_image, init_img);
init_img = ggml_reshape_4d(work_ctx, init_img, width, height, 1, 3); init_img = ggml_reshape_4d(work_ctx, init_img, width, height, 1, 3);
auto init_image_latent = sd_ctx->sd->encode_first_stage(work_ctx, init_img); // [b*c, 1, h/16, w/16] auto init_image_latent = sd_ctx->sd->encode_first_stage(work_ctx, init_img); // [b*c, 1, h/16, w/16]
@ -2624,22 +2598,95 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
sd_ctx->sd->process_latent_out(init_latent); sd_ctx->sd->process_latent_out(init_latent);
for (int i3 = 0; i3 < init_image_latent->ne[3]; i3++) { ggml_tensor_iter(init_image_latent, [&](ggml_tensor* t, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
for (int i2 = 0; i2 < init_image_latent->ne[2]; i2++) { float value = ggml_tensor_get_f32(t, i0, i1, i2, i3);
for (int i1 = 0; i1 < init_image_latent->ne[1]; i1++) { ggml_tensor_set_f32(init_latent, value, i0, i1, i2, i3);
for (int i0 = 0; i0 < init_image_latent->ne[0]; i0++) { if (i3 == 0) {
float value = ggml_tensor_get_f32(init_image_latent, i0, i1, i2, i3); ggml_tensor_set_f32(denoise_mask, 0.f, i0, i1, i2, i3);
ggml_tensor_set_f32(init_latent, value, i0, i1, i2, i3);
if (i3 == 0) {
ggml_tensor_set_f32(denoise_mask, 0.f, i0, i1, i2, i3);
}
}
}
} }
} });
sd_ctx->sd->process_latent_in(init_latent); sd_ctx->sd->process_latent_in(init_latent);
int64_t t2 = ggml_time_ms();
LOG_INFO("encode_first_stage completed, taking %" PRId64 " ms", t2 - t1);
} else if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-VACE-1.3B" ||
sd_ctx->sd->diffusion_model->get_desc() == "Wan2.x-VACE-14B") {
LOG_INFO("VACE");
int64_t t1 = ggml_time_ms();
ggml_tensor* ref_image_latent = NULL;
if (sd_vid_gen_params->init_image.data) {
ggml_tensor* ref_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1);
sd_image_to_tensor(sd_vid_gen_params->init_image, ref_img);
ref_img = ggml_reshape_4d(work_ctx, ref_img, width, height, 1, 3);
ref_image_latent = sd_ctx->sd->encode_first_stage(work_ctx, ref_img); // [b*c, 1, h/16, w/16]
sd_ctx->sd->process_latent_in(ref_image_latent);
auto zero_latent = ggml_dup_tensor(work_ctx, ref_image_latent);
ggml_set_f32(zero_latent, 0.f);
ref_image_latent = ggml_tensor_concat(work_ctx, ref_image_latent, zero_latent, 3); // [b*2*c, 1, h/16, w/16]
}
ggml_tensor* control_video = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, frames, 3);
ggml_tensor_iter(control_video, [&](ggml_tensor* control_video, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = 0.5f;
if (i2 < sd_vid_gen_params->control_frames_size) {
value = sd_image_get_f32(sd_vid_gen_params->control_frames[i2], i0, i1, i3);
}
ggml_tensor_set_f32(control_video, value, i0, i1, i2, i3);
});
ggml_tensor* mask = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, frames, 1);
ggml_set_f32(mask, 1.0f);
ggml_tensor* inactive = ggml_dup_tensor(work_ctx, control_video);
ggml_tensor* reactive = ggml_dup_tensor(work_ctx, control_video);
ggml_tensor_iter(control_video, [&](ggml_tensor* t, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float control_video_value = ggml_tensor_get_f32(t, i0, i1, i2, i3) - 0.5f;
float mask_value = ggml_tensor_get_f32(mask, i0, i1, i2, 0);
float inactive_value = (control_video_value * (1.f - mask_value)) + 0.5f;
float reactive_value = (control_video_value * mask_value) + 0.5f;
ggml_tensor_set_f32(inactive, inactive_value, i0, i1, i2, i3);
ggml_tensor_set_f32(reactive, reactive_value, i0, i1, i2, i3);
});
inactive = sd_ctx->sd->encode_first_stage(work_ctx, inactive); // [b*c, t, h/8, w/8]
reactive = sd_ctx->sd->encode_first_stage(work_ctx, reactive); // [b*c, t, h/8, w/8]
sd_ctx->sd->process_latent_in(inactive);
sd_ctx->sd->process_latent_in(reactive);
int64_t length = inactive->ne[2];
if (ref_image_latent) {
length += 1;
frames = (length - 1) * 4 + 1;
ref_image_num = 1;
}
vace_context = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, inactive->ne[0], inactive->ne[1], length, 96); // [b*96, t, h/8, w/8]
ggml_tensor_iter(vace_context, [&](ggml_tensor* vace_context, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value;
if (i3 < 32) {
if (ref_image_latent && i2 == 0) {
value = ggml_tensor_get_f32(ref_image_latent, i0, i1, 0, i3);
} else {
if (i3 < 16) {
value = ggml_tensor_get_f32(inactive, i0, i1, i2 - ref_image_num, i3);
} else {
value = ggml_tensor_get_f32(reactive, i0, i1, i2 - ref_image_num, i3 - 16);
}
}
} else { // mask
if (ref_image_latent && i2 == 0) {
value = 0.f;
} else {
int64_t vae_stride = 8;
int64_t mask_height_index = i1 * vae_stride + (i3 - 32) / vae_stride;
int64_t mask_width_index = i0 * vae_stride + (i3 - 32) % vae_stride;
value = ggml_tensor_get_f32(mask, mask_width_index, mask_height_index, i2 - ref_image_num, 0);
}
}
ggml_tensor_set_f32(vace_context, value, i0, i1, i2, i3);
});
int64_t t2 = ggml_time_ms(); int64_t t2 = ggml_time_ms();
LOG_INFO("encode_first_stage completed, taking %" PRId64 " ms", t2 - t1); LOG_INFO("encode_first_stage completed, taking %" PRId64 " ms", t2 - t1);
} }
@ -2721,7 +2768,10 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
-1, -1,
{}, {},
{}, {},
denoise_mask); false,
denoise_mask,
vace_context,
sd_vid_gen_params->vace_strength);
int64_t sampling_end = ggml_time_ms(); int64_t sampling_end = ggml_time_ms();
LOG_INFO("sampling(high noise) completed, taking %.2fs", (sampling_end - sampling_start) * 1.0f / 1000); LOG_INFO("sampling(high noise) completed, taking %.2fs", (sampling_end - sampling_start) * 1.0f / 1000);
@ -2753,7 +2803,10 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
-1, -1,
{}, {},
{}, {},
denoise_mask); false,
denoise_mask,
vace_context,
sd_vid_gen_params->vace_strength);
int64_t sampling_end = ggml_time_ms(); int64_t sampling_end = ggml_time_ms();
LOG_INFO("sampling completed, taking %.2fs", (sampling_end - sampling_start) * 1.0f / 1000); LOG_INFO("sampling completed, taking %.2fs", (sampling_end - sampling_start) * 1.0f / 1000);
@ -2762,6 +2815,20 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
} }
} }
if (ref_image_num > 0) {
ggml_tensor* trim_latent = ggml_new_tensor_4d(work_ctx,
GGML_TYPE_F32,
final_latent->ne[0],
final_latent->ne[1],
final_latent->ne[2] - ref_image_num,
final_latent->ne[3]);
ggml_tensor_iter(trim_latent, [&](ggml_tensor* trim_latent, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_tensor_get_f32(final_latent, i0, i1, i2 + ref_image_num, i3);
ggml_tensor_set_f32(trim_latent, value, i0, i1, i2, i3);
});
final_latent = trim_latent;
}
int64_t t4 = ggml_time_ms(); int64_t t4 = ggml_time_ms();
LOG_INFO("generating latent video completed, taking %.2fs", (t4 - t2) * 1.0f / 1000); LOG_INFO("generating latent video completed, taking %.2fs", (t4 - t2) * 1.0f / 1000);
struct ggml_tensor* vid = sd_ctx->sd->decode_first_stage(work_ctx, final_latent, true); struct ggml_tensor* vid = sd_ctx->sd->decode_first_stage(work_ctx, final_latent, true);

View File

@ -214,6 +214,8 @@ typedef struct {
int clip_skip; int clip_skip;
sd_image_t init_image; sd_image_t init_image;
sd_image_t end_image; sd_image_t end_image;
sd_image_t* control_frames;
int control_frames_size;
int width; int width;
int height; int height;
sd_sample_params_t sample_params; sd_sample_params_t sample_params;
@ -222,6 +224,7 @@ typedef struct {
float strength; float strength;
int64_t seed; int64_t seed;
int video_frames; int video_frames;
float vace_strength;
} sd_vid_gen_params_t; } sd_vid_gen_params_t;
typedef struct sd_ctx_t sd_ctx_t; typedef struct sd_ctx_t sd_ctx_t;
@ -278,14 +281,12 @@ SD_API bool convert(const char* input_path,
enum sd_type_t output_type, enum sd_type_t output_type,
const char* tensor_type_rules); const char* tensor_type_rules);
SD_API uint8_t* preprocess_canny(uint8_t* img, SD_API bool preprocess_canny(sd_image_t image,
int width, float high_threshold,
int height, float low_threshold,
float high_threshold, float weak,
float low_threshold, float strong,
float weak, bool inverse);
float strong,
bool inverse);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -81,7 +81,7 @@ struct UpscalerGGML {
} }
// LOG_DEBUG("upscale work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f); // LOG_DEBUG("upscale work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
ggml_tensor* input_image_tensor = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, input_image.width, input_image.height, 3, 1); ggml_tensor* input_image_tensor = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, input_image.width, input_image.height, 3, 1);
sd_image_to_tensor(input_image.data, input_image_tensor); sd_image_to_tensor(input_image, input_image_tensor);
ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1); ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1);
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {

205
wan.hpp
View File

@ -1532,13 +1532,13 @@ namespace WAN {
blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim)); blocks["ffn.2"] = std::shared_ptr<GGMLBlock>(new Linear(ffn_dim, dim));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, virtual struct ggml_tensor* forward(struct ggml_context* ctx,
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* e, struct ggml_tensor* e,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* context, struct ggml_tensor* context,
int64_t context_img_len = 257) { int64_t context_img_len = 257) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// e: [N, 6, dim] or [N, T, 6, dim] // e: [N, 6, dim] or [N, T, 6, dim]
// context: [N, context_img_len + context_txt_len, dim] // context: [N, context_img_len + context_txt_len, dim]
@ -1584,6 +1584,59 @@ namespace WAN {
} }
}; };
class VaceWanAttentionBlock : public WanAttentionBlock {
protected:
int block_id;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") {
enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
}
public:
VaceWanAttentionBlock(bool t2v_cross_attn,
int64_t dim,
int64_t ffn_dim,
int64_t num_heads,
bool qk_norm = true,
bool cross_attn_norm = false,
float eps = 1e-6,
int block_id = 0,
bool flash_attn = false)
: WanAttentionBlock(t2v_cross_attn, dim, ffn_dim, num_heads, qk_norm, cross_attn_norm, eps, flash_attn), block_id(block_id) {
if (block_id == 0) {
blocks["before_proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim));
}
blocks["after_proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim));
}
std::pair<ggml_tensor*, ggml_tensor*> forward(struct ggml_context* ctx,
ggml_backend_t backend,
struct ggml_tensor* c,
struct ggml_tensor* x,
struct ggml_tensor* e,
struct ggml_tensor* pe,
struct ggml_tensor* context,
int64_t context_img_len = 257) {
// x: [N, n_token, dim]
// e: [N, 6, dim] or [N, T, 6, dim]
// context: [N, context_img_len + context_txt_len, dim]
// return [N, n_token, dim]
if (block_id == 0) {
auto before_proj = std::dynamic_pointer_cast<Linear>(blocks["before_proj"]);
c = before_proj->forward(ctx, c);
c = ggml_add(ctx, c, x);
}
auto after_proj = std::dynamic_pointer_cast<Linear>(blocks["after_proj"]);
c = WanAttentionBlock::forward(ctx, backend, c, e, pe, context, context_img_len);
auto c_skip = after_proj->forward(ctx, c);
return {c_skip, c};
}
};
class Head : public GGMLBlock { class Head : public GGMLBlock {
protected: protected:
int dim; int dim;
@ -1680,22 +1733,25 @@ namespace WAN {
}; };
struct WanParams { struct WanParams {
std::string model_type = "t2v"; std::string model_type = "t2v";
std::tuple<int, int, int> patch_size = {1, 2, 2}; std::tuple<int, int, int> patch_size = {1, 2, 2};
int64_t text_len = 512; int64_t text_len = 512;
int64_t in_dim = 16; int64_t in_dim = 16;
int64_t dim = 2048; int64_t dim = 2048;
int64_t ffn_dim = 8192; int64_t ffn_dim = 8192;
int64_t freq_dim = 256; int64_t freq_dim = 256;
int64_t text_dim = 4096; int64_t text_dim = 4096;
int64_t out_dim = 16; int64_t out_dim = 16;
int64_t num_heads = 16; int64_t num_heads = 16;
int64_t num_layers = 32; int64_t num_layers = 32;
bool qk_norm = true; int64_t vace_layers = 0;
bool cross_attn_norm = true; int64_t vace_in_dim = 96;
float eps = 1e-6; std::map<int, int> vace_layers_mapping = {};
int64_t flf_pos_embed_token_number = 0; bool qk_norm = true;
int theta = 10000; bool cross_attn_norm = true;
float eps = 1e-6;
int64_t flf_pos_embed_token_number = 0;
int theta = 10000;
// wan2.1 1.3B: 1536/12, wan2.1/2.2 14B: 5120/40, wan2.2 5B: 3074/24 // wan2.1 1.3B: 1536/12, wan2.1/2.2 14B: 5120/40, wan2.2 5B: 3074/24
std::vector<int> axes_dim = {44, 42, 42}; std::vector<int> axes_dim = {44, 42, 42};
int64_t axes_dim_sum = 128; int64_t axes_dim_sum = 128;
@ -1746,6 +1802,31 @@ namespace WAN {
if (params.model_type == "i2v") { if (params.model_type == "i2v") {
blocks["img_emb"] = std::shared_ptr<GGMLBlock>(new MLPProj(1280, params.dim, params.flf_pos_embed_token_number)); blocks["img_emb"] = std::shared_ptr<GGMLBlock>(new MLPProj(1280, params.dim, params.flf_pos_embed_token_number));
} }
// vace
if (params.vace_layers > 0) {
for (int i = 0; i < params.vace_layers; i++) {
auto block = std::shared_ptr<GGMLBlock>(new VaceWanAttentionBlock(params.model_type == "t2v",
params.dim,
params.ffn_dim,
params.num_heads,
params.qk_norm,
params.cross_attn_norm,
params.eps,
i,
params.flash_attn));
blocks["vace_blocks." + std::to_string(i)] = block;
}
int step = params.num_layers / params.vace_layers;
int n = 0;
for (int i = 0; i < params.num_layers; i += step) {
this->params.vace_layers_mapping[i] = n;
n++;
}
blocks["vace_patch_embedding"] = std::shared_ptr<GGMLBlock>(new Conv3d(params.vace_in_dim, params.dim, params.patch_size, params.patch_size));
}
} }
struct ggml_tensor* pad_to_patch_size(struct ggml_context* ctx, struct ggml_tensor* pad_to_patch_size(struct ggml_context* ctx,
@ -1795,9 +1876,12 @@ namespace WAN {
struct ggml_tensor* timestep, struct ggml_tensor* timestep,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = NULL,
int64_t N = 1) { struct ggml_tensor* vace_context = NULL,
float vace_strength = 1.f,
int64_t N = 1) {
// x: [N*C, T, H, W], C => in_dim // x: [N*C, T, H, W], C => in_dim
// vace_context: [N*vace_in_dim, T, H, W]
// timestep: [N,] or [T] // timestep: [N,] or [T]
// context: [N, L, text_dim] // context: [N, L, text_dim]
// return: [N, t_len*h_len*w_len, out_dim*pt*ph*pw] // return: [N, t_len*h_len*w_len, out_dim*pt*ph*pw]
@ -1845,10 +1929,35 @@ namespace WAN {
context_img_len = clip_fea->ne[1]; // 257 context_img_len = clip_fea->ne[1]; // 257
} }
// vace_patch_embedding
ggml_tensor* c = NULL;
if (params.vace_layers > 0) {
auto vace_patch_embedding = std::dynamic_pointer_cast<Conv3d>(blocks["vace_patch_embedding"]);
c = vace_patch_embedding->forward(ctx, vace_context); // [N*dim, t_len, h_len, w_len]
c = ggml_reshape_3d(ctx, c, c->ne[0] * c->ne[1] * c->ne[2], c->ne[3] / N, N); // [N, dim, t_len*h_len*w_len]
c = ggml_nn_cont(ctx, ggml_torch_permute(ctx, c, 1, 0, 2, 3)); // [N, t_len*h_len*w_len, dim]
}
auto x_orig = x;
for (int i = 0; i < params.num_layers; i++) { for (int i = 0; i < params.num_layers; i++) {
auto block = std::dynamic_pointer_cast<WanAttentionBlock>(blocks["blocks." + std::to_string(i)]); auto block = std::dynamic_pointer_cast<WanAttentionBlock>(blocks["blocks." + std::to_string(i)]);
x = block->forward(ctx, backend, x, e0, pe, context, context_img_len); x = block->forward(ctx, backend, x, e0, pe, context, context_img_len);
auto iter = params.vace_layers_mapping.find(i);
if (iter != params.vace_layers_mapping.end()) {
int n = iter->second;
auto vace_block = std::dynamic_pointer_cast<VaceWanAttentionBlock>(blocks["vace_blocks." + std::to_string(n)]);
auto result = vace_block->forward(ctx, backend, c, x_orig, e0, pe, context, context_img_len);
auto c_skip = result.first;
c = result.second;
c_skip = ggml_scale(ctx, c_skip, vace_strength);
x = ggml_add(ctx, x, c_skip);
}
} }
x = head->forward(ctx, x, e); // [N, t_len*h_len*w_len, pt*ph*pw*out_dim] x = head->forward(ctx, x, e); // [N, t_len*h_len*w_len, pt*ph*pw*out_dim]
@ -1864,6 +1973,8 @@ namespace WAN {
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = NULL,
struct ggml_tensor* time_dim_concat = NULL, struct ggml_tensor* time_dim_concat = NULL,
struct ggml_tensor* vace_context = NULL,
float vace_strength = 1.f,
int64_t N = 1) { int64_t N = 1) {
// Forward pass of DiT. // Forward pass of DiT.
// x: [N*C, T, H, W] // x: [N*C, T, H, W]
@ -1892,7 +2003,7 @@ namespace WAN {
t_len = ((x->ne[2] + (std::get<0>(params.patch_size) / 2)) / std::get<0>(params.patch_size)); t_len = ((x->ne[2] + (std::get<0>(params.patch_size) / 2)) / std::get<0>(params.patch_size));
} }
auto out = forward_orig(ctx, backend, x, timestep, context, pe, clip_fea, N); // [N, t_len*h_len*w_len, pt*ph*pw*C] auto out = forward_orig(ctx, backend, x, timestep, context, pe, clip_fea, vace_context, vace_strength, N); // [N, t_len*h_len*w_len, pt*ph*pw*C]
out = unpatchify(ctx, out, t_len, h_len, w_len); // [N*C, (T+pad_t) + (T2+pad_t2), H + pad_h, W + pad_w] out = unpatchify(ctx, out, t_len, h_len, w_len); // [N*C, (T+pad_t) + (T2+pad_t2), H + pad_h, W + pad_w]
@ -1927,7 +2038,19 @@ namespace WAN {
std::string tensor_name = pair.first; std::string tensor_name = pair.first;
if (tensor_name.find(prefix) == std::string::npos) if (tensor_name.find(prefix) == std::string::npos)
continue; continue;
size_t pos = tensor_name.find("blocks."); size_t pos = tensor_name.find("vace_blocks.");
if (pos != std::string::npos) {
tensor_name = tensor_name.substr(pos); // remove prefix
auto items = split_string(tensor_name, '.');
if (items.size() > 1) {
int block_index = atoi(items[1].c_str());
if (block_index + 1 > wan_params.vace_layers) {
wan_params.vace_layers = block_index + 1;
}
}
continue;
}
pos = tensor_name.find("blocks.");
if (pos != std::string::npos) { if (pos != std::string::npos) {
tensor_name = tensor_name.substr(pos); // remove prefix tensor_name = tensor_name.substr(pos); // remove prefix
auto items = split_string(tensor_name, '.'); auto items = split_string(tensor_name, '.');
@ -1937,6 +2060,7 @@ namespace WAN {
wan_params.num_layers = block_index + 1; wan_params.num_layers = block_index + 1;
} }
} }
continue;
} }
if (tensor_name.find("img_emb") != std::string::npos) { if (tensor_name.find("img_emb") != std::string::npos) {
wan_params.model_type = "i2v"; wan_params.model_type = "i2v";
@ -1958,7 +2082,11 @@ namespace WAN {
wan_params.out_dim = 48; wan_params.out_dim = 48;
wan_params.text_len = 512; wan_params.text_len = 512;
} else { } else {
desc = "Wan2.1-T2V-1.3B"; if (wan_params.vace_layers > 0) {
desc = "Wan2.1-VACE-1.3B";
} else {
desc = "Wan2.1-T2V-1.3B";
}
wan_params.dim = 1536; wan_params.dim = 1536;
wan_params.eps = 1e-06; wan_params.eps = 1e-06;
wan_params.ffn_dim = 8960; wan_params.ffn_dim = 8960;
@ -1974,7 +2102,11 @@ namespace WAN {
desc = "Wan2.2-I2V-14B"; desc = "Wan2.2-I2V-14B";
wan_params.in_dim = 36; wan_params.in_dim = 36;
} else { } else {
desc = "Wan2.x-T2V-14B"; if (wan_params.vace_layers > 0) {
desc = "Wan2.x-VACE-14B";
} else {
desc = "Wan2.x-T2V-14B";
}
wan_params.in_dim = 16; wan_params.in_dim = 16;
} }
} else { } else {
@ -2015,7 +2147,9 @@ namespace WAN {
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = NULL,
struct ggml_tensor* c_concat = NULL, struct ggml_tensor* c_concat = NULL,
struct ggml_tensor* time_dim_concat = NULL) { struct ggml_tensor* time_dim_concat = NULL,
struct ggml_tensor* vace_context = NULL,
float vace_strength = 1.f) {
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, WAN_GRAPH_SIZE, false); struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, WAN_GRAPH_SIZE, false);
x = to_backend(x); x = to_backend(x);
@ -2024,6 +2158,7 @@ namespace WAN {
clip_fea = to_backend(clip_fea); clip_fea = to_backend(clip_fea);
c_concat = to_backend(c_concat); c_concat = to_backend(c_concat);
time_dim_concat = to_backend(time_dim_concat); time_dim_concat = to_backend(time_dim_concat);
vace_context = to_backend(vace_context);
pe_vec = Rope::gen_wan_pe(x->ne[2], pe_vec = Rope::gen_wan_pe(x->ne[2],
x->ne[1], x->ne[1],
@ -2053,7 +2188,9 @@ namespace WAN {
context, context,
pe, pe,
clip_fea, clip_fea,
time_dim_concat); time_dim_concat,
vace_context,
vace_strength);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
@ -2067,10 +2204,12 @@ namespace WAN {
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = NULL,
struct ggml_tensor* c_concat = NULL, struct ggml_tensor* c_concat = NULL,
struct ggml_tensor* time_dim_concat = NULL, struct ggml_tensor* time_dim_concat = NULL,
struct ggml_tensor* vace_context = NULL,
float vace_strength = 1.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = NULL) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat); return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
}; };
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx); GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
@ -2108,7 +2247,7 @@ namespace WAN {
struct ggml_tensor* out = NULL; struct ggml_tensor* out = NULL;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, timesteps, context, NULL, NULL, NULL, &out, work_ctx); compute(8, x, timesteps, context, NULL, NULL, NULL, NULL, 1.f, &out, work_ctx);
int t1 = ggml_time_ms(); int t1 = ggml_time_ms();
print_ggml_tensor(out); print_ggml_tensor(out);