Compare commits

..

No commits in common. "f16a110f8776398ef23a2a6b7b57522c2471637a" and "545fac4f3fb0117a4e962b1a04cf933a7e635933" have entirely different histories.

41 changed files with 7159 additions and 7793 deletions

View File

@ -11,7 +11,7 @@ Caching methods accelerate diffusion inference by reusing intermediate computati
| `dbcache` | DiT models | Block-level L1 residual threshold | | `dbcache` | DiT models | Block-level L1 residual threshold |
| `taylorseer` | DiT models | Taylor series approximation | | `taylorseer` | DiT models | Taylor series approximation |
| `cache-dit` | DiT models | Combined DBCache + TaylorSeer | | `cache-dit` | DiT models | Combined DBCache + TaylorSeer |
| `spectrum` | UNET and DiT models | Chebyshev + Taylor output forecasting | | `spectrum` | UNET models | Chebyshev + Taylor output forecasting |
### UCache (UNET Models) ### UCache (UNET Models)
@ -111,9 +111,9 @@ Mask values: `1` = compute, `0` = can cache.
--scm-policy dynamic --scm-policy dynamic
``` ```
### Spectrum (UNET and DiT Models) ### Spectrum (UNET Models)
Spectrum uses Chebyshev polynomial fitting blended with Taylor extrapolation to predict denoised outputs, skipping entire forward passes. Based on the paper [Spectrum: Adaptive Spectral Feature Forecasting for Efficient Diffusion Sampling](https://github.com/tingyu215/Spectrum). Spectrum uses Chebyshev polynomial fitting blended with Taylor extrapolation to predict denoised outputs, skipping entire UNet forward passes. Based on the paper [Spectrum: Adaptive Spectral Feature Forecasting for Efficient Diffusion Sampling](https://github.com/tingyu215/Spectrum).
```bash ```bash
sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum

View File

@ -601,7 +601,7 @@ int main(int argc, const char* argv[]) {
if (gen_params.end_image_path.size() > 0) { if (gen_params.end_image_path.size() > 0) {
vae_decode_only = false; vae_decode_only = false;
if (!load_image_and_update_size(gen_params.end_image_path, end_image)) { if (!load_image_and_update_size(gen_params.init_image_path, end_image)) {
return 1; return 1;
} }
} }

View File

@ -602,19 +602,20 @@ namespace Anima {
return Rope::embed_nd(ids, bs, axis_thetas, axes_dim); return Rope::embed_nd(ids, bs, axis_thetas, axes_dim);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor = {}, ggml_tensor* context,
const sd::Tensor<int32_t>& t5_ids_tensor = {}, ggml_tensor* t5_ids = nullptr,
const sd::Tensor<float>& t5_weights_tensor = {}) { ggml_tensor* t5_weights = nullptr) {
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* t5_ids = make_optional_input(t5_ids_tensor);
ggml_tensor* t5_weights = make_optional_input(t5_weights_tensor);
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
x = to_backend(x);
timesteps = to_backend(timesteps);
context = to_backend(context);
t5_ids = to_backend(t5_ids);
t5_weights = to_backend(t5_weights);
int64_t pad_h = (net.patch_size - x->ne[1] % net.patch_size) % net.patch_size; int64_t pad_h = (net.patch_size - x->ne[1] % net.patch_size) % net.patch_size;
int64_t pad_w = (net.patch_size - x->ne[0] % net.patch_size) % net.patch_size; int64_t pad_w = (net.patch_size - x->ne[0] % net.patch_size) % net.patch_size;
int64_t h_pad = x->ne[1] + pad_h; int64_t h_pad = x->ne[1] + pad_h;
@ -666,16 +667,18 @@ namespace Anima {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context = {}, ggml_tensor* context,
const sd::Tensor<int32_t>& t5_ids = {}, ggml_tensor* t5_ids = nullptr,
const sd::Tensor<float>& t5_weights = {}) { ggml_tensor* t5_weights = nullptr,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, t5_ids, t5_weights); return build_graph(x, timesteps, context, t5_ids, t5_weights);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
}; };
} // namespace Anima } // namespace Anima

View File

@ -1,4 +1,4 @@
#ifndef __AUTO_ENCODER_KL_HPP__ #ifndef __AUTO_ENCODER_KL_HPP__
#define __AUTO_ENCODER_KL_HPP__ #define __AUTO_ENCODER_KL_HPP__
#include "vae.hpp" #include "vae.hpp"
@ -685,9 +685,10 @@ struct AutoEncoderKL : public VAE {
ae.get_param_tensors(tensors, prefix); ae.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) { ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* z = make_input(z_tensor);
z = to_backend(z);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
@ -698,100 +699,184 @@ struct AutoEncoderKL : public VAE {
return gf; return gf;
} }
sd::Tensor<float> _compute(const int n_threads, bool _compute(const int n_threads,
const sd::Tensor<float>& z, ggml_tensor* z,
bool decode_graph) override { bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) override {
GGML_ASSERT(!decode_only || decode_graph); GGML_ASSERT(!decode_only || decode_graph);
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z, decode_graph); return build_graph(z, decode_graph);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z.dim()); // ggml_set_f32(z, 0.5f);
// print_ggml_tensor(z);
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
sd::Tensor<float> gaussian_latent_sample(const sd::Tensor<float>& moments, std::shared_ptr<RNG> rng) { ggml_tensor* gaussian_latent_sample(ggml_context* work_ctx, ggml_tensor* moments, std::shared_ptr<RNG> rng) {
// ldm.modules.distributions.distributions.DiagonalGaussianDistribution.sample // ldm.modules.distributions.distributions.DiagonalGaussianDistribution.sample
auto chunks = sd::ops::chunk(moments, 2, 2); ggml_tensor* latents = ggml_new_tensor_4d(work_ctx, moments->type, moments->ne[0], moments->ne[1], moments->ne[2] / 2, moments->ne[3]);
const auto& mean = chunks[0]; ggml_tensor* noise = ggml_dup_tensor(work_ctx, latents);
const auto& logvar = chunks[1]; ggml_ext_im_set_randn_f32(noise, rng);
sd::Tensor<float> stddev = sd::ops::exp(0.5f * sd::ops::clamp(logvar, -30.0f, 20.0f)); {
sd::Tensor<float> noise = sd::Tensor<float>::randn_like(mean, rng); float mean = 0;
sd::Tensor<float> latents = mean + stddev * noise; float logvar = 0;
float value = 0;
float std_ = 0;
for (int i = 0; i < latents->ne[3]; i++) {
for (int j = 0; j < latents->ne[2]; j++) {
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
mean = ggml_ext_tensor_get_f32(moments, l, k, j, i);
logvar = ggml_ext_tensor_get_f32(moments, l, k, j + (int)latents->ne[2], i);
logvar = std::max(-30.0f, std::min(logvar, 20.0f));
std_ = std::exp(0.5f * logvar);
value = mean + std_ * ggml_ext_tensor_get_f32(noise, l, k, j, i);
// printf("%d %d %d %d -> %f\n", i, j, k, l, value);
ggml_ext_tensor_set_f32(latents, value, l, k, j, i);
}
}
}
}
}
return latents; return latents;
} }
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override { ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
return vae_output; return vae_output;
} else if (version == VERSION_SD1_PIX2PIX) { } else if (version == VERSION_SD1_PIX2PIX) {
return sd::ops::chunk(vae_output, 2, 2)[0]; return ggml_view_3d(work_ctx,
vae_output,
vae_output->ne[0],
vae_output->ne[1],
vae_output->ne[2] / 2,
vae_output->nb[1],
vae_output->nb[2],
0);
} else { } else {
return gaussian_latent_sample(vae_output, rng); return gaussian_latent_sample(work_ctx, vae_output, rng);
} }
} }
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents, int channel_dim) { void get_latents_mean_std_vec(ggml_tensor* latents, int channel_dim, std::vector<float>& latents_mean_vec, std::vector<float>& latents_std_vec) {
GGML_ASSERT(channel_dim >= 0 && static_cast<size_t>(channel_dim) < static_cast<size_t>(latents.dim())); // flux2
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
GGML_ASSERT(latents.shape()[channel_dim] == 128); GGML_ASSERT(latents->ne[channel_dim] == 128);
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1); latents_mean_vec = {-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f,
stats_shape[static_cast<size_t>(channel_dim)] = latents.shape()[channel_dim]; -0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f,
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f,
auto mean_tensor = sd::Tensor<float>::from_vector({-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f, 0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f,
-0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f, -0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f,
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f, 0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f,
0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f, 0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f,
-0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f, 0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f,
0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f, -0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f,
0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f, 0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f,
0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f, -0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f,
-0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f, 0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f,
0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f, -0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f,
-0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f, 0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f,
0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f, -0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f,
-0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f, -0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f};
0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f, latents_std_vec = {
-0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f, 1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f,
-0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f}); 1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f,
mean_tensor.reshape_(stats_shape); 1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f,
auto std_tensor = sd::Tensor<float>::from_vector({1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f, 1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f,
1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f, 1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f,
1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f, 1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f,
1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f, 1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f,
1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f, 1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f,
1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f, 1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f,
1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f, 1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f,
1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f, 1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f,
1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f, 1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f,
1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f, 1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f,
1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f, 1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f, 1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f, 1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f};
1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
} else { } else {
GGML_ABORT("unknown version %d", version); GGML_ABORT("unknown version %d", version);
} }
} }
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override { ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
ggml_tensor* vae_latents = ggml_dup(work_ctx, latents);
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
int channel_dim = 2; int channel_dim = 2;
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim); std::vector<float> latents_mean_vec;
return (latents * std_tensor) / scale_factor + mean_tensor; std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = value * std_ / scale_factor + mean;
ggml_ext_tensor_set_f32(vae_latents, value, l, k, j, i);
}
}
}
}
} else {
ggml_ext_tensor_iter(latents, [&](ggml_tensor* latents, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_ext_tensor_get_f32(latents, i0, i1, i2, i3);
value = (value / scale_factor) + shift_factor;
ggml_ext_tensor_set_f32(vae_latents, value, i0, i1, i2, i3);
});
} }
return (latents / scale_factor) + shift_factor; return vae_latents;
} }
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override { ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
ggml_tensor* diffusion_latents = ggml_dup(work_ctx, latents);
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
int channel_dim = 2; int channel_dim = 2;
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim); std::vector<float> latents_mean_vec;
return ((latents - mean_tensor) * scale_factor) / std_tensor; std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = (value - mean) * scale_factor / std_;
ggml_ext_tensor_set_f32(diffusion_latents, value, l, k, j, i);
}
}
}
}
} else {
ggml_ext_tensor_iter(latents, [&](ggml_tensor* latents, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_ext_tensor_get_f32(latents, i0, i1, i2, i3);
value = (value - shift_factor) * scale_factor;
ggml_ext_tensor_set_f32(diffusion_latents, value, i0, i1, i2, i3);
});
} }
return (latents - shift_factor) * scale_factor; return diffusion_latents;
} }
int get_encoder_output_channels(int input_channels) { int get_encoder_output_channels(int input_channels) {
@ -804,26 +889,24 @@ struct AutoEncoderKL : public VAE {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// CPU, x{1, 3, 64, 64}: Pass // CPU, x{1, 3, 64, 64}: Pass
// CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan // CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan
// CPU, x{2, 3, 64, 64}: Wrong result // CPU, x{2, 3, 64, 64}: Wrong result
// CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result // CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result
sd::Tensor<float> x({64, 64, 3, 2}); auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2);
x.fill_(0.5f); ggml_set_f32(x, 0.5f);
print_sd_tensor(x); print_ggml_tensor(x);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = _compute(8, x, false); _compute(8, x, false, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("encode test done in %lldms", t1 - t0); LOG_DEBUG("encode test done in %lldms", t1 - t0);
} }
@ -832,18 +915,16 @@ struct AutoEncoderKL : public VAE {
// CUDA, z{1, 4, 8, 8}: Pass // CUDA, z{1, 4, 8, 8}: Pass
// CPU, z{3, 4, 8, 8}: Wrong result // CPU, z{3, 4, 8, 8}: Wrong result
// CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result // CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result
sd::Tensor<float> z({8, 8, 4, 1}); auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1);
z.fill_(0.5f); ggml_set_f32(z, 0.5f);
print_sd_tensor(z); print_ggml_tensor(z);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = _compute(8, z, true); _compute(8, z, true, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("decode test done in %lldms", t1 - t0); LOG_DEBUG("decode test done in %lldms", t1 - t0);
} }
}; };

View File

@ -8,9 +8,7 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "condition_cache_utils.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct DBCacheConfig { struct DBCacheConfig {
bool enabled = false; bool enabled = false;
@ -773,37 +771,35 @@ struct CacheDitConditionState {
return it != cache_diffs.end() && !it->second.diff.empty(); return it != cache_diffs.end() && !it->second.diff.empty();
} }
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) { void update_cache(const void* cond, const float* input, const float* output, size_t size) {
CacheEntry& entry = cache_diffs[cond]; CacheEntry& entry = cache_diffs[cond];
if (!sd::store_condition_cache_diff(&entry.diff, input, output)) { entry.diff.resize(size);
entry.prev_input.clear(); for (size_t i = 0; i < size; i++) {
entry.prev_output.clear(); entry.diff[i] = output[i] - input[i];
entry.has_prev = false;
return;
} }
size_t size = static_cast<size_t>(output.numel());
const float* input_data = input.data();
const float* output_data = output.data();
entry.prev_input.resize(size); entry.prev_input.resize(size);
entry.prev_output.resize(size); entry.prev_output.resize(size);
for (size_t i = 0; i < size; i++) { for (size_t i = 0; i < size; i++) {
entry.prev_input[i] = input_data[i]; entry.prev_input[i] = input[i];
entry.prev_output[i] = output_data[i]; entry.prev_output[i] = output[i];
} }
entry.has_prev = true; entry.has_prev = true;
} }
void apply_cache(const void* cond, void apply_cache(const void* cond, const float* input, float* output, size_t size) {
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) if (it == cache_diffs.end() || it->second.diff.empty())
return; return;
sd::apply_condition_cache_diff(it->second.diff, input, output); if (it->second.diff.size() != size)
return;
for (size_t i = 0; i < size; i++) {
output[i] = input[i] + it->second.diff[i];
}
} }
bool before_condition(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output, float sigma, int step_index) { bool before_condition(const void* cond, ggml_tensor* input, ggml_tensor* output, float sigma, int step_index) {
if (!enabled() || step_index < 0) if (!enabled() || step_index < 0)
return false; return false;
@ -823,7 +819,8 @@ struct CacheDitConditionState {
if (skip_current_step) { if (skip_current_step) {
if (has_cache(cond)) { if (has_cache(cond)) {
apply_cache(cond, input, output); apply_cache(cond, (float*)input->data, (float*)output->data,
static_cast<size_t>(ggml_nelements(output)));
return true; return true;
} }
return false; return false;
@ -836,13 +833,13 @@ struct CacheDitConditionState {
if (it == cache_diffs.end() || !it->second.has_prev) if (it == cache_diffs.end() || !it->second.has_prev)
return false; return false;
size_t ne = static_cast<size_t>(input.numel()); size_t ne = static_cast<size_t>(ggml_nelements(input));
if (it->second.prev_input.size() != ne) if (it->second.prev_input.size() != ne)
return false; return false;
const float* input_data = input.data(); float* input_data = (float*)input->data;
float diff = CacheDitState::calculate_residual_diff( float diff = CacheDitState::calculate_residual_diff(
it->second.prev_input.data(), input_data, ne); it->second.prev_input.data(), input_data, ne);
float effective_threshold = config.residual_diff_threshold; float effective_threshold = config.residual_diff_threshold;
if (config.Fn_compute_blocks > 0) { if (config.Fn_compute_blocks > 0) {
@ -862,7 +859,7 @@ struct CacheDitConditionState {
cached_steps.push_back(current_step_index); cached_steps.push_back(current_step_index);
continuous_cached_steps++; continuous_cached_steps++;
accumulated_residual_diff += diff; accumulated_residual_diff += diff;
apply_cache(cond, input, output); apply_cache(cond, input_data, (float*)output->data, ne);
return true; return true;
} }
@ -870,14 +867,15 @@ struct CacheDitConditionState {
return false; return false;
} }
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) { void after_condition(const void* cond, ggml_tensor* input, ggml_tensor* output) {
if (!step_is_active()) if (!step_is_active())
return; return;
update_cache(cond, input, output); size_t ne = static_cast<size_t>(ggml_nelements(output));
update_cache(cond, (float*)input->data, (float*)output->data, ne);
if (cond == anchor_condition && taylor_config.enabled) { if (cond == anchor_condition && taylor_config.enabled) {
taylor_state.update_derivatives(output.data(), static_cast<size_t>(output.numel()), current_step_index); taylor_state.update_derivatives((float*)output->data, ne, current_step_index);
} }
} }

View File

@ -957,14 +957,15 @@ struct CLIPTextModelRunner : public GGMLRunner {
return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip); return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip);
} }
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor, ggml_cgraph* build_graph(ggml_tensor* input_ids,
int num_custom_embeddings = 0, int num_custom_embeddings = 0,
void* custom_embeddings_data = nullptr, void* custom_embeddings_data = nullptr,
size_t max_token_idx = 0, size_t max_token_idx = 0,
bool return_pooled = false, bool return_pooled = false,
int clip_skip = -1) { int clip_skip = -1) {
ggml_cgraph* gf = new_graph_custom(2048); ggml_cgraph* gf = new_graph_custom(2048);
ggml_tensor* input_ids = make_input(input_ids_tensor);
input_ids = to_backend(input_ids);
ggml_tensor* embeddings = nullptr; ggml_tensor* embeddings = nullptr;
@ -1003,21 +1004,19 @@ struct CLIPTextModelRunner : public GGMLRunner {
return gf; return gf;
} }
sd::Tensor<float> compute(const int n_threads, bool compute(const int n_threads,
const sd::Tensor<int32_t>& input_ids, ggml_tensor* input_ids,
int num_custom_embeddings, int num_custom_embeddings,
void* custom_embeddings_data, void* custom_embeddings_data,
size_t max_token_idx, size_t max_token_idx,
bool return_pooled, bool return_pooled,
int clip_skip) { int clip_skip,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip); return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
}; };
auto result = GGMLRunner::compute<float>(get_graph, n_threads, true); return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
if (return_pooled) {
return take_or_empty(std::move(result));
}
return restore_trailing_singleton_dims(std::move(result), 3);
} }
}; };

View File

@ -4,11 +4,11 @@
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
namespace DiT { namespace DiT {
inline ggml_tensor* patchify(ggml_context* ctx, ggml_tensor* patchify(ggml_context* ctx,
ggml_tensor* x, ggml_tensor* x,
int pw, int pw,
int ph, int ph,
bool patch_last = true) { bool patch_last = true) {
// x: [N, C, H, W] // x: [N, C, H, W]
// return: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C] // return: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C]
int64_t N = x->ne[3]; int64_t N = x->ne[3];
@ -33,13 +33,13 @@ namespace DiT {
return x; return x;
} }
inline ggml_tensor* unpatchify(ggml_context* ctx, ggml_tensor* unpatchify(ggml_context* ctx,
ggml_tensor* x, ggml_tensor* x,
int64_t h, int64_t h,
int64_t w, int64_t w,
int ph, int ph,
int pw, int pw,
bool patch_last = true) { bool patch_last = true) {
// x: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C] // x: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C]
// return: [N, C, H, W] // return: [N, C, H, W]
int64_t N = x->ne[2]; int64_t N = x->ne[2];
@ -64,10 +64,10 @@ namespace DiT {
return x; return x;
} }
inline ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx, ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
ggml_tensor* x, ggml_tensor* x,
int ph, int ph,
int pw) { int pw) {
int64_t W = x->ne[0]; int64_t W = x->ne[0];
int64_t H = x->ne[1]; int64_t H = x->ne[1];
@ -77,23 +77,23 @@ namespace DiT {
return x; return x;
} }
inline ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx, ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx,
ggml_tensor* x, ggml_tensor* x,
int ph, int ph,
int pw, int pw,
bool patch_last = true) { bool patch_last = true) {
x = pad_to_patch_size(ctx, x, ph, pw); x = pad_to_patch_size(ctx, x, ph, pw);
x = patchify(ctx->ggml_ctx, x, ph, pw, patch_last); x = patchify(ctx->ggml_ctx, x, ph, pw, patch_last);
return x; return x;
} }
inline ggml_tensor* unpatchify_and_crop(ggml_context* ctx, ggml_tensor* unpatchify_and_crop(ggml_context* ctx,
ggml_tensor* x, ggml_tensor* x,
int64_t H, int64_t H,
int64_t W, int64_t W,
int ph, int ph,
int pw, int pw,
bool patch_last = true) { bool patch_last = true) {
int pad_h = (ph - H % ph) % ph; int pad_h = (ph - H % ph) % ph;
int pad_w = (pw - W % pw) % pw; int pad_w = (pw - W % pw) % pw;
int64_t h = ((H + pad_h) / ph); int64_t h = ((H + pad_h) / ph);
@ -105,4 +105,4 @@ namespace DiT {
} }
} // namespace DiT } // namespace DiT
#endif // __COMMON_DIT_HPP__ #endif // __COMMON_DIT_HPP__

View File

@ -1,64 +0,0 @@
#ifndef __CONDITION_CACHE_UTILS_HPP__
#define __CONDITION_CACHE_UTILS_HPP__
#include <vector>
#include "tensor.hpp"
namespace sd {
inline bool store_condition_cache_diff(std::vector<float>* diff,
const sd::Tensor<float>& input,
const sd::Tensor<float>& output) {
if (diff == nullptr || input.empty() || output.empty()) {
return false;
}
size_t input_size = static_cast<size_t>(input.numel());
size_t output_size = static_cast<size_t>(output.numel());
if (input_size == 0 || input_size != output_size) {
diff->clear();
return false;
}
const float* input_data = input.data();
const float* output_data = output.data();
if (input_data == nullptr || output_data == nullptr) {
diff->clear();
return false;
}
diff->resize(output_size);
for (size_t i = 0; i < output_size; ++i) {
(*diff)[i] = output_data[i] - input_data[i];
}
return true;
}
inline bool apply_condition_cache_diff(const std::vector<float>& diff,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
if (output == nullptr || input.empty() || diff.empty()) {
return false;
}
size_t input_size = static_cast<size_t>(input.numel());
if (input_size == 0 || diff.size() != input_size) {
return false;
}
*output = input;
float* output_data = output->data();
if (output_data == nullptr) {
return false;
}
for (size_t i = 0; i < input_size; ++i) {
output_data[i] += diff[i];
}
return true;
}
} // namespace sd
#endif // __CONDITION_CACHE_UTILS_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -310,13 +310,11 @@ struct ControlNet : public GGMLRunner {
SDVersion version = VERSION_SD1; SDVersion version = VERSION_SD1;
ControlNetBlock control_net; ControlNetBlock control_net;
ggml_backend_buffer_t control_buffer = nullptr; ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory
ggml_context* control_ctx = nullptr; ggml_context* control_ctx = nullptr;
std::vector<ggml_tensor*> control_outputs_ggml; std::vector<ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5
ggml_tensor* guided_hint_output_ggml = nullptr; ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference
std::vector<sd::Tensor<float>> controls; bool guided_hint_cached = false;
sd::Tensor<float> guided_hint;
bool guided_hint_cached = false;
ControlNet(ggml_backend_t backend, ControlNet(ggml_backend_t backend,
bool offload_params_to_cpu, bool offload_params_to_cpu,
@ -337,16 +335,16 @@ struct ControlNet : public GGMLRunner {
params.no_alloc = true; params.no_alloc = true;
control_ctx = ggml_init(params); control_ctx = ggml_init(params);
control_outputs_ggml.resize(outs.size() - 1); controls.resize(outs.size() - 1);
size_t control_buffer_size = 0; size_t control_buffer_size = 0;
guided_hint_output_ggml = ggml_dup_tensor(control_ctx, outs[0]); guided_hint = ggml_dup_tensor(control_ctx, outs[0]);
control_buffer_size += ggml_nbytes(guided_hint_output_ggml); control_buffer_size += ggml_nbytes(guided_hint);
for (int i = 0; i < outs.size() - 1; i++) { for (int i = 0; i < outs.size() - 1; i++) {
control_outputs_ggml[i] = ggml_dup_tensor(control_ctx, outs[i + 1]); controls[i] = ggml_dup_tensor(control_ctx, outs[i + 1]);
control_buffer_size += ggml_nbytes(control_outputs_ggml[i]); control_buffer_size += ggml_nbytes(controls[i]);
} }
control_buffer = ggml_backend_alloc_ctx_tensors(control_ctx, runtime_backend); control_buffer = ggml_backend_alloc_ctx_tensors(control_ctx, runtime_backend);
@ -363,10 +361,8 @@ struct ControlNet : public GGMLRunner {
ggml_free(control_ctx); ggml_free(control_ctx);
control_ctx = nullptr; control_ctx = nullptr;
} }
guided_hint_output_ggml = nullptr; guided_hint = nullptr;
guided_hint_cached = false; guided_hint_cached = false;
guided_hint = {};
control_outputs_ggml.clear();
controls.clear(); controls.clear();
} }
@ -378,33 +374,29 @@ struct ControlNet : public GGMLRunner {
control_net.get_param_tensors(tensors, prefix); control_net.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& hint_tensor, ggml_tensor* hint,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor = {}, ggml_tensor* context,
const sd::Tensor<float>& y_tensor = {}) { ggml_tensor* y = nullptr) {
ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor); x = to_backend(x);
ggml_tensor* hint = nullptr; if (guided_hint_cached) {
ggml_tensor* timesteps = make_input(timesteps_tensor); hint = nullptr;
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
ggml_tensor* guided_hint_input = nullptr;
if (guided_hint_cached && !guided_hint.empty()) {
guided_hint_input = make_input(guided_hint);
hint = nullptr;
} else { } else {
hint = make_input(hint_tensor); hint = to_backend(hint);
} }
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
auto outs = control_net.forward(&runner_ctx, auto outs = control_net.forward(&runner_ctx,
x, x,
hint, hint,
guided_hint_input, guided_hint_cached ? guided_hint : nullptr,
timesteps, timesteps,
context, context,
y); y);
@ -413,20 +405,22 @@ struct ControlNet : public GGMLRunner {
alloc_control_ctx(outs); alloc_control_ctx(outs);
} }
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint_output_ggml)); ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint));
for (int i = 0; i < outs.size() - 1; i++) { for (int i = 0; i < outs.size() - 1; i++) {
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], control_outputs_ggml[i])); ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], controls[i]));
} }
return gf; return gf;
} }
std::optional<std::vector<sd::Tensor<float>>> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& hint, ggml_tensor* hint,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context = {}, ggml_tensor* context,
const sd::Tensor<float>& y = {}) { ggml_tensor* y,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
@ -435,24 +429,12 @@ struct ControlNet : public GGMLRunner {
return build_graph(x, hint, timesteps, context, y); return build_graph(x, hint, timesteps, context, y);
}; };
auto compute_result = GGMLRunner::compute<float>(get_graph, n_threads, false); bool res = GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
if (!compute_result.has_value()) { if (res) {
return std::nullopt; // cache guided_hint
guided_hint_cached = true;
} }
return res;
if (guided_hint_output_ggml != nullptr) {
guided_hint = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(guided_hint_output_ggml),
4);
}
controls.clear();
controls.reserve(control_outputs_ggml.size());
for (ggml_tensor* control : control_outputs_ggml) {
auto control_host = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(control), 4);
GGML_ASSERT(!control_host.empty());
controls.push_back(std::move(control_host));
}
guided_hint_cached = true;
return controls;
} }
bool load_from_file(const std::string& file_path, int n_threads) { bool load_from_file(const std::string& file_path, int n_threads) {
@ -480,4 +462,4 @@ struct ControlNet : public GGMLRunner {
} }
}; };
#endif // __CONTROL_HPP__ #endif // __CONTROL_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -1,45 +1,37 @@
#ifndef __DIFFUSION_MODEL_H__ #ifndef __DIFFUSION_MODEL_H__
#define __DIFFUSION_MODEL_H__ #define __DIFFUSION_MODEL_H__
#include <optional>
#include "anima.hpp" #include "anima.hpp"
#include "flux.hpp" #include "flux.hpp"
#include "mmdit.hpp" #include "mmdit.hpp"
#include "qwen_image.hpp" #include "qwen_image.hpp"
#include "tensor_ggml.hpp"
#include "unet.hpp" #include "unet.hpp"
#include "wan.hpp" #include "wan.hpp"
#include "z_image.hpp" #include "z_image.hpp"
struct DiffusionParams { struct DiffusionParams {
const sd::Tensor<float>* x = nullptr; ggml_tensor* x = nullptr;
const sd::Tensor<float>* timesteps = nullptr; ggml_tensor* timesteps = nullptr;
const sd::Tensor<float>* context = nullptr; ggml_tensor* context = nullptr;
const sd::Tensor<float>* c_concat = nullptr; ggml_tensor* c_concat = nullptr;
const sd::Tensor<float>* y = nullptr; ggml_tensor* y = nullptr;
const sd::Tensor<int32_t>* t5_ids = nullptr; ggml_tensor* guidance = nullptr;
const sd::Tensor<float>* t5_weights = nullptr; std::vector<ggml_tensor*> ref_latents = {};
const sd::Tensor<float>* guidance = nullptr; bool increase_ref_index = false;
const std::vector<sd::Tensor<float>>* ref_latents = nullptr; int num_video_frames = -1;
bool increase_ref_index = false; std::vector<ggml_tensor*> controls = {};
int num_video_frames = -1; float control_strength = 0.f;
const std::vector<sd::Tensor<float>>* controls = nullptr; ggml_tensor* vace_context = nullptr;
float control_strength = 0.f; float vace_strength = 1.f;
const sd::Tensor<float>* vace_context = nullptr; std::vector<int> skip_layers = {};
float vace_strength = 1.f;
const std::vector<int>* skip_layers = nullptr;
}; };
template <typename T>
static inline const sd::Tensor<T>& tensor_or_empty(const sd::Tensor<T>* tensor) {
static const sd::Tensor<T> kEmpty;
return tensor != nullptr ? *tensor : kEmpty;
}
struct DiffusionModel { struct DiffusionModel {
virtual std::string get_desc() = 0; virtual std::string get_desc() = 0;
virtual sd::Tensor<float> compute(int n_threads, virtual bool compute(int n_threads,
const DiffusionParams& diffusion_params) = 0; DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) = 0;
virtual void alloc_params_buffer() = 0; virtual void alloc_params_buffer() = 0;
virtual void free_params_buffer() = 0; virtual void free_params_buffer() = 0;
virtual void free_compute_buffer() = 0; virtual void free_compute_buffer() = 0;
@ -101,20 +93,19 @@ struct UNetModel : public DiffusionModel {
unet.set_circular_axes(circular_x, circular_y); unet.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
static const std::vector<sd::Tensor<float>> empty_controls;
return unet.compute(n_threads, return unet.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
tensor_or_empty(diffusion_params.c_concat), diffusion_params.c_concat,
tensor_or_empty(diffusion_params.y), diffusion_params.y,
diffusion_params.num_video_frames, diffusion_params.num_video_frames,
diffusion_params.controls ? *diffusion_params.controls : empty_controls, diffusion_params.controls,
diffusion_params.control_strength); diffusion_params.control_strength, output, output_ctx);
} }
}; };
@ -167,17 +158,18 @@ struct MMDiTModel : public DiffusionModel {
mmdit.set_circular_axes(circular_x, circular_y); mmdit.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
static const std::vector<int> empty_skip_layers;
return mmdit.compute(n_threads, return mmdit.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
tensor_or_empty(diffusion_params.y), diffusion_params.y,
diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers); output,
output_ctx,
diffusion_params.skip_layers);
} }
}; };
@ -232,22 +224,22 @@ struct FluxModel : public DiffusionModel {
flux.set_circular_axes(circular_x, circular_y); flux.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
static const std::vector<sd::Tensor<float>> empty_ref_latents;
static const std::vector<int> empty_skip_layers;
return flux.compute(n_threads, return flux.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
tensor_or_empty(diffusion_params.c_concat), diffusion_params.c_concat,
tensor_or_empty(diffusion_params.y), diffusion_params.y,
tensor_or_empty(diffusion_params.guidance), diffusion_params.guidance,
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents, diffusion_params.ref_latents,
diffusion_params.increase_ref_index, diffusion_params.increase_ref_index,
diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers); output,
output_ctx,
diffusion_params.skip_layers);
} }
}; };
@ -302,16 +294,18 @@ struct AnimaModel : public DiffusionModel {
anima.set_circular_axes(circular_x, circular_y); anima.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
return anima.compute(n_threads, return anima.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
tensor_or_empty(diffusion_params.t5_ids), diffusion_params.c_concat,
tensor_or_empty(diffusion_params.t5_weights)); diffusion_params.y,
output,
output_ctx);
} }
}; };
@ -367,19 +361,21 @@ struct WanModel : public DiffusionModel {
wan.set_circular_axes(circular_x, circular_y); wan.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
return wan.compute(n_threads, return wan.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
tensor_or_empty(diffusion_params.y), diffusion_params.y,
tensor_or_empty(diffusion_params.c_concat), diffusion_params.c_concat,
sd::Tensor<float>(), nullptr,
tensor_or_empty(diffusion_params.vace_context), diffusion_params.vace_context,
diffusion_params.vace_strength); diffusion_params.vace_strength,
output,
output_ctx);
} }
}; };
@ -436,17 +432,18 @@ struct QwenImageModel : public DiffusionModel {
qwen_image.set_circular_axes(circular_x, circular_y); qwen_image.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
static const std::vector<sd::Tensor<float>> empty_ref_latents;
return qwen_image.compute(n_threads, return qwen_image.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents, diffusion_params.ref_latents,
true); true, // increase_ref_index
output,
output_ctx);
} }
}; };
@ -502,17 +499,18 @@ struct ZImageModel : public DiffusionModel {
z_image.set_circular_axes(circular_x, circular_y); z_image.set_circular_axes(circular_x, circular_y);
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const DiffusionParams& diffusion_params) override { DiffusionParams diffusion_params,
GGML_ASSERT(diffusion_params.x != nullptr); ggml_tensor** output = nullptr,
GGML_ASSERT(diffusion_params.timesteps != nullptr); ggml_context* output_ctx = nullptr) override {
static const std::vector<sd::Tensor<float>> empty_ref_latents;
return z_image.compute(n_threads, return z_image.compute(n_threads,
*diffusion_params.x, diffusion_params.x,
*diffusion_params.timesteps, diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context), diffusion_params.context,
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents, diffusion_params.ref_latents,
true); true, // increase_ref_index
output,
output_ctx);
} }
}; };

View File

@ -1,15 +1,10 @@
#ifndef __EASYCACHE_HPP__
#define __EASYCACHE_HPP__
#include <cmath> #include <cmath>
#include <limits> #include <limits>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "condition_cache_utils.hpp"
#include "denoiser.hpp" #include "denoiser.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct EasyCacheConfig { struct EasyCacheConfig {
bool enabled = false; bool enabled = false;
@ -24,15 +19,15 @@ struct EasyCacheCacheEntry {
struct EasyCacheState { struct EasyCacheState {
EasyCacheConfig config; EasyCacheConfig config;
Denoiser* denoiser = nullptr; Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max(); float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f; float end_sigma = 0.0f;
bool initialized = false; bool initialized = false;
bool initial_step = true; bool initial_step = true;
bool skip_current_step = false; bool skip_current_step = false;
bool step_active = false; bool step_active = false;
const void* anchor_condition = nullptr; const SDCondition* anchor_condition = nullptr;
std::unordered_map<const void*, EasyCacheCacheEntry> cache_diffs; std::unordered_map<const SDCondition*, EasyCacheCacheEntry> cache_diffs;
std::vector<float> prev_input; std::vector<float> prev_input;
std::vector<float> prev_output; std::vector<float> prev_output;
float output_prev_norm = 0.0f; float output_prev_norm = 0.0f;
@ -125,30 +120,41 @@ struct EasyCacheState {
return enabled() && step_active && skip_current_step; return enabled() && step_active && skip_current_step;
} }
bool has_cache(const void* cond) const { bool has_cache(const SDCondition* cond) const {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
return it != cache_diffs.end() && !it->second.diff.empty(); return it != cache_diffs.end() && !it->second.diff.empty();
} }
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) { void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
EasyCacheCacheEntry& entry = cache_diffs[cond]; EasyCacheCacheEntry& entry = cache_diffs[cond];
sd::store_condition_cache_diff(&entry.diff, input, output); size_t ne = static_cast<size_t>(ggml_nelements(output));
entry.diff.resize(ne);
float* out_data = (float*)output->data;
float* in_data = (float*)input->data;
for (size_t i = 0; i < ne; ++i) {
entry.diff[i] = out_data[i] - in_data[i];
}
} }
void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) { void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) { if (it == cache_diffs.end() || it->second.diff.empty()) {
return; return;
} }
sd::apply_condition_cache_diff(it->second.diff, input, output); copy_ggml_tensor(output, input);
float* out_data = (float*)output->data;
const std::vector<float>& diff = it->second.diff;
for (size_t i = 0; i < diff.size(); ++i) {
out_data[i] += diff[i];
}
} }
bool before_condition(const void* cond, bool before_condition(const SDCondition* cond,
const sd::Tensor<float>& input, ggml_tensor* input,
sd::Tensor<float>* output, ggml_tensor* output,
float sigma, float sigma,
int step_index) { int step_index) {
if (!enabled() || step_index < 0 || output == nullptr) { if (!enabled() || step_index < 0) {
return false; return false;
} }
if (step_index != current_step_index) { if (step_index != current_step_index) {
@ -175,12 +181,12 @@ struct EasyCacheState {
if (!has_prev_input || !has_prev_output || !has_cache(cond)) { if (!has_prev_input || !has_prev_output || !has_cache(cond)) {
return false; return false;
} }
size_t ne = static_cast<size_t>(input.numel()); size_t ne = static_cast<size_t>(ggml_nelements(input));
if (prev_input.size() != ne) { if (prev_input.size() != ne) {
return false; return false;
} }
const float* input_data = input.data(); float* input_data = (float*)input->data;
last_input_change = 0.0f; last_input_change = 0.0f;
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
last_input_change += std::fabs(input_data[i] - prev_input[i]); last_input_change += std::fabs(input_data[i] - prev_input[i]);
} }
@ -205,7 +211,7 @@ struct EasyCacheState {
return false; return false;
} }
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) { void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
if (!step_is_active()) { if (!step_is_active()) {
return; return;
} }
@ -214,16 +220,16 @@ struct EasyCacheState {
return; return;
} }
size_t ne = static_cast<size_t>(input.numel()); size_t ne = static_cast<size_t>(ggml_nelements(input));
const float* in_data = input.data(); float* in_data = (float*)input->data;
prev_input.resize(ne); prev_input.resize(ne);
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
prev_input[i] = in_data[i]; prev_input[i] = in_data[i];
} }
has_prev_input = true; has_prev_input = true;
const float* out_data = output.data(); float* out_data = (float*)output->data;
float output_change = 0.0f; float output_change = 0.0f;
if (has_prev_output && prev_output.size() == ne) { if (has_prev_output && prev_output.size() == ne) {
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
output_change += std::fabs(out_data[i] - prev_output[i]); output_change += std::fabs(out_data[i] - prev_output[i]);
@ -256,6 +262,4 @@ struct EasyCacheState {
cumulative_change_rate = 0.0f; cumulative_change_rate = 0.0f;
has_last_input_change = false; has_last_input_change = false;
} }
}; };
#endif

View File

@ -341,12 +341,12 @@ struct ESRGAN : public GGMLRunner {
return success; return success;
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor) { ggml_cgraph* build_graph(ggml_tensor* x) {
if (!rrdb_net) if (!rrdb_net)
return nullptr; return nullptr;
constexpr int kGraphNodes = 1 << 16; // 65k constexpr int kGraphNodes = 1 << 16; // 65k
ggml_cgraph* gf = new_graph_custom(kGraphNodes); ggml_cgraph* gf = new_graph_custom(kGraphNodes);
ggml_tensor* x = make_input(x_tensor); x = to_backend(x);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
ggml_tensor* out = rrdb_net->forward(&runner_ctx, x); ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
@ -354,12 +354,15 @@ struct ESRGAN : public GGMLRunner {
return gf; return gf;
} }
sd::Tensor<float> compute(const int n_threads, bool compute(const int n_threads,
const sd::Tensor<float>& x) { ggml_tensor* x,
auto get_graph = [&]() -> ggml_cgraph* { return build_graph(x); }; ggml_tensor** output,
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); ggml_context* output_ctx = nullptr) {
return result; auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
}; };
#endif // __ESRGAN_HPP__ #endif // __ESRGAN_HPP__

View File

@ -1178,7 +1178,6 @@ namespace Flux {
std::vector<float> pe_vec; std::vector<float> pe_vec;
std::vector<float> mod_index_arange_vec; std::vector<float> mod_index_arange_vec;
std::vector<float> dct_vec; std::vector<float> dct_vec;
sd::Tensor<float> guidance_tensor;
SDVersion version; SDVersion version;
bool use_mask = false; bool use_mask = false;
@ -1354,42 +1353,29 @@ namespace Flux {
return dct; return dct;
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor = {}, ggml_tensor* context,
const sd::Tensor<float>& c_concat_tensor = {}, ggml_tensor* c_concat,
const sd::Tensor<float>& y_tensor = {}, ggml_tensor* y,
const sd::Tensor<float>& guidance_tensor = {}, ggml_tensor* guidance,
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
if (flux_params.guidance_embed || flux_params.is_chroma) {
if (!guidance_tensor.empty()) {
this->guidance_tensor = guidance_tensor;
if (flux_params.is_chroma) {
this->guidance_tensor.fill_(0.f);
}
}
}
ggml_tensor* guidance = make_optional_input(this->guidance_tensor);
std::vector<ggml_tensor*> ref_latents;
ref_latents.reserve(ref_latents_tensor.size());
for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
}
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
ggml_tensor* mod_index_arange = nullptr; ggml_tensor* mod_index_arange = nullptr;
ggml_tensor* dct = nullptr; // for chroma radiance ggml_tensor* dct = nullptr; // for chroma radiance
x = to_backend(x);
context = to_backend(context);
if (c_concat != nullptr) {
c_concat = to_backend(c_concat);
}
if (flux_params.is_chroma) { if (flux_params.is_chroma) {
guidance = ggml_set_f32(guidance, 0);
if (!use_mask) { if (!use_mask) {
y = nullptr; y = nullptr;
} }
@ -1399,6 +1385,16 @@ namespace Flux {
mod_index_arange = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, mod_index_arange_vec.size()); mod_index_arange = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, mod_index_arange_vec.size());
set_backend_tensor_data(mod_index_arange, mod_index_arange_vec.data()); set_backend_tensor_data(mod_index_arange, mod_index_arange_vec.data());
} }
y = to_backend(y);
timesteps = to_backend(timesteps);
if (flux_params.guidance_embed || flux_params.is_chroma) {
guidance = to_backend(guidance);
}
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
}
std::set<int> txt_arange_dims; std::set<int> txt_arange_dims;
if (sd_version_is_flux2(version)) { if (sd_version_is_flux2(version)) {
txt_arange_dims = {3}; txt_arange_dims = {3};
@ -1459,16 +1455,18 @@ namespace Flux {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context = {}, ggml_tensor* context,
const sd::Tensor<float>& c_concat = {}, ggml_tensor* c_concat,
const sd::Tensor<float>& y = {}, ggml_tensor* y,
const sd::Tensor<float>& guidance = {}, ggml_tensor* guidance,
const std::vector<sd::Tensor<float>>& ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
std::vector<int> skip_layers = std::vector<int>()) { ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
@ -1478,8 +1476,7 @@ namespace Flux {
return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers); return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers);
}; };
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return result;
} }
void test() { void test() {
@ -1488,51 +1485,41 @@ namespace Flux {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// cpu f16: // cpu f16:
// cuda f16: nan // cuda f16: nan
// cuda q8_0: pass // cuda q8_0: pass
sd::Tensor<float> x({16, 16, 128, 1}); auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 128, 1);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
// auto x = load_tensor_from_file(ctx, "chroma_x.bin"); // auto x = load_tensor_from_file(work_ctx, "chroma_x.bin");
// print_ggml_tensor(x); // print_ggml_tensor(x);
std::vector<float> timesteps_vec(1, 1.f); std::vector<float> timesteps_vec(1, 1.f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec); auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
std::vector<float> guidance_vec(1, 0.f); std::vector<float> guidance_vec(1, 0.f);
auto guidance = sd::Tensor<float>::from_vector(guidance_vec); auto guidance = vector_to_ggml_tensor(work_ctx, guidance_vec);
sd::Tensor<float> context({15360, 256, 1}); auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 15360, 256, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
// auto context = load_tensor_from_file(ctx, "chroma_context.bin"); // auto context = load_tensor_from_file(work_ctx, "chroma_context.bin");
// print_ggml_tensor(context); // print_ggml_tensor(context);
// auto y = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 768, 1); // auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, 1);
// ggml_set_f32(y, 0.01f); // ggml_set_f32(y, 0.01f);
auto y = nullptr; auto y = nullptr;
// print_ggml_tensor(y); // print_ggml_tensor(y);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx);
x, int64_t t1 = ggml_time_ms();
timesteps,
context,
{},
{},
guidance,
{},
false);
int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("flux test done in %lldms", t1 - t0); LOG_DEBUG("flux test done in %lldms", t1 - t0);
} }
} }

View File

@ -13,7 +13,6 @@
#include <iterator> #include <iterator>
#include <map> #include <map>
#include <memory> #include <memory>
#include <optional>
#include <random> #include <random>
#include <regex> #include <regex>
#include <set> #include <set>
@ -28,7 +27,6 @@
#include "ggml.h" #include "ggml.h"
#include "model.h" #include "model.h"
#include "tensor.hpp"
#ifdef SD_USE_CUDA #ifdef SD_USE_CUDA
#include "ggml-cuda.h" #include "ggml-cuda.h"
@ -51,7 +49,6 @@
#endif #endif
#include "rng.hpp" #include "rng.hpp"
#include "tensor_ggml.hpp"
#include "util.h" #include "util.h"
#define EPS 1e-05f #define EPS 1e-05f
@ -208,6 +205,14 @@ __STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int64_t iw, int64_t i
return value; return value;
} }
__STATIC_INLINE__ float sd_image_get_f32(sd_image_f32_t image, int64_t iw, int64_t ih, int64_t ic, bool scale = true) {
float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic);
if (scale) {
value /= 255.f;
}
return value;
}
__STATIC_INLINE__ void print_ggml_tensor(ggml_tensor* tensor, bool shape_only = false, const char* mark = "") { __STATIC_INLINE__ void print_ggml_tensor(ggml_tensor* tensor, bool shape_only = false, const char* mark = "") {
printf("%s (%s): shape(%zu, %zu, %zu, %zu)\n", mark, ggml_type_name(tensor->type), tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]); printf("%s (%s): shape(%zu, %zu, %zu, %zu)\n", mark, ggml_type_name(tensor->type), tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
fflush(stdout); fflush(stdout);
@ -245,56 +250,6 @@ __STATIC_INLINE__ void print_ggml_tensor(ggml_tensor* tensor, bool shape_only =
} }
} }
template <typename T>
__STATIC_INLINE__ void print_sd_tensor(const sd::Tensor<T>& tensor, bool shape_only = false, const char* mark = "") {
printf("%s: shape(", mark);
for (size_t i = 0; i < static_cast<size_t>(tensor.dim()); ++i) {
printf("%s%lld", i == 0 ? "" : ", ", static_cast<long long>(tensor.shape()[i]));
}
printf(")\n");
fflush(stdout);
if (shape_only) {
return;
}
int range = 3;
std::vector<int64_t> shape = tensor.shape();
while (shape.size() < 4) {
shape.push_back(1);
}
for (int64_t i3 = 0; i3 < shape[3]; i3++) {
if (i3 >= range && i3 + range < shape[3]) {
continue;
}
for (int64_t i2 = 0; i2 < shape[2]; i2++) {
if (i2 >= range && i2 + range < shape[2]) {
continue;
}
for (int64_t i1 = 0; i1 < shape[1]; i1++) {
if (i1 >= range && i1 + range < shape[1]) {
continue;
}
for (int64_t i0 = 0; i0 < shape[0]; i0++) {
if (i0 >= range && i0 + range < shape[0]) {
continue;
}
size_t offset = static_cast<size_t>(i0 + shape[0] * (i1 + shape[1] * (i2 + shape[2] * i3)));
printf(" [%lld, %lld, %lld, %lld] = ", static_cast<long long>(i3), static_cast<long long>(i2), static_cast<long long>(i1), static_cast<long long>(i0));
if constexpr (std::is_same_v<T, float>) {
printf("%f\n", tensor[static_cast<int64_t>(offset)]);
} else if constexpr (std::is_same_v<T, ggml_fp16_t>) {
printf("%f\n", ggml_fp16_to_fp32(tensor[static_cast<int64_t>(offset)]));
} else if constexpr (std::is_same_v<T, int32_t>) {
printf("%d\n", tensor[static_cast<int64_t>(offset)]);
} else if constexpr (std::is_same_v<T, int64_t>) {
printf("%lld\n", static_cast<long long>(tensor[static_cast<int64_t>(offset)]));
}
fflush(stdout);
}
}
}
}
}
__STATIC_INLINE__ void ggml_ext_tensor_iter( __STATIC_INLINE__ void ggml_ext_tensor_iter(
ggml_tensor* tensor, ggml_tensor* tensor,
const std::function<void(ggml_tensor*, int64_t, int64_t, int64_t, int64_t)>& fn) { const std::function<void(ggml_tensor*, int64_t, int64_t, int64_t, int64_t)>& fn) {
@ -520,6 +475,99 @@ __STATIC_INLINE__ void ggml_ext_tensor_apply_mask(ggml_tensor* image_data,
} }
} }
__STATIC_INLINE__ void sd_image_f32_to_ggml_tensor(sd_image_f32_t image,
ggml_tensor* tensor,
bool scale = true) {
GGML_ASSERT(image.width == tensor->ne[0]);
GGML_ASSERT(image.height == tensor->ne[1]);
GGML_ASSERT(image.channel == tensor->ne[2]);
GGML_ASSERT(1 == tensor->ne[3]);
GGML_ASSERT(tensor->type == GGML_TYPE_F32);
ggml_ext_tensor_iter(tensor, [&](ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = sd_image_get_f32(image, i0, i1, i2, scale);
ggml_ext_tensor_set_f32(tensor, value, i0, i1, i2, i3);
});
}
__STATIC_INLINE__ void ggml_ext_tensor_split_2d(ggml_tensor* input,
ggml_tensor* output,
int x,
int y) {
int64_t width = output->ne[0];
int64_t height = output->ne[1];
int64_t channels = output->ne[2];
int64_t ne3 = output->ne[3];
int64_t input_width = input->ne[0];
int64_t input_height = input->ne[1];
GGML_ASSERT(input->type == GGML_TYPE_F32 && output->type == GGML_TYPE_F32);
for (int iy = 0; iy < height; iy++) {
for (int ix = 0; ix < width; ix++) {
for (int k = 0; k < channels; k++) {
for (int l = 0; l < ne3; l++) {
float value = ggml_ext_tensor_get_f32(input, (ix + x) % input_width, (iy + y) % input_height, k, l);
ggml_ext_tensor_set_f32(output, value, ix, iy, k, l);
}
}
}
}
}
// unclamped -> expects x in the range [0-1]
__STATIC_INLINE__ float smootherstep_f32(const float x) {
GGML_ASSERT(x >= 0.f && x <= 1.f);
return x * x * x * (x * (6.0f * x - 15.0f) + 10.0f);
}
__STATIC_INLINE__ void ggml_ext_tensor_merge_2d(ggml_tensor* input,
ggml_tensor* output,
int x,
int y,
int overlap_x,
int overlap_y,
bool circular_x,
bool circular_y,
int x_skip = 0,
int y_skip = 0) {
int64_t width = input->ne[0];
int64_t height = input->ne[1];
int64_t channels = input->ne[2];
int64_t ne3 = input->ne[3];
int64_t img_width = output->ne[0];
int64_t img_height = output->ne[1];
GGML_ASSERT(input->type == GGML_TYPE_F32 && output->type == GGML_TYPE_F32);
for (int iy = y_skip; iy < height; iy++) {
for (int ix = x_skip; ix < width; ix++) {
for (int k = 0; k < channels; k++) {
for (int l = 0; l < ne3; l++) {
float new_value = ggml_ext_tensor_get_f32(input, ix, iy, k, l);
if (overlap_x > 0 || overlap_y > 0) { // blend colors in overlapped area
float old_value = ggml_ext_tensor_get_f32(output, (x + ix) % img_width, (y + iy) % img_height, k, l);
const float x_f_0 = (circular_x || (overlap_x > 0 && x > 0)) ? (ix - x_skip) / float(overlap_x) : 1;
const float x_f_1 = (circular_x || (overlap_x > 0 && x < (img_width - width))) ? (width - ix) / float(overlap_x) : 1;
const float y_f_0 = (circular_y || (overlap_y > 0 && y > 0)) ? (iy - y_skip) / float(overlap_y) : 1;
const float y_f_1 = (circular_y || (overlap_y > 0 && y < (img_height - height))) ? (height - iy) / float(overlap_y) : 1;
const float x_f = std::min(std::min(x_f_0, x_f_1), 1.f);
const float y_f = std::min(std::min(y_f_0, y_f_1), 1.f);
ggml_ext_tensor_set_f32(
output,
old_value + new_value * smootherstep_f32(y_f) * smootherstep_f32(x_f),
(x + ix) % img_width, (y + iy) % img_height, k, l);
} else {
ggml_ext_tensor_set_f32(output, new_value, (x + ix) % img_width, (y + iy) % img_height, k, l);
}
}
}
}
}
}
__STATIC_INLINE__ float ggml_ext_tensor_mean(ggml_tensor* src) { __STATIC_INLINE__ float ggml_ext_tensor_mean(ggml_tensor* src) {
float mean = 0.0f; float mean = 0.0f;
int64_t nelements = ggml_nelements(src); int64_t nelements = ggml_nelements(src);
@ -784,102 +832,22 @@ __STATIC_INLINE__ void sd_tiling_calc_tiles(int& num_tiles_dim,
} }
// Tiling // Tiling
__STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
ggml_tensor* output,
const int scale,
const int p_tile_size_x,
const int p_tile_size_y,
const float tile_overlap_factor,
const bool circular_x,
const bool circular_y,
on_tile_process on_processing,
bool slient = false) {
output = ggml_set_f32(output, 0);
__STATIC_INLINE__ int64_t sd_tensor_plane_size(const sd::Tensor<float>& tensor) { int input_width = (int)input->ne[0];
GGML_ASSERT(tensor.dim() >= 2); int input_height = (int)input->ne[1];
return tensor.shape()[0] * tensor.shape()[1]; int output_width = (int)output->ne[0];
} int output_height = (int)output->ne[1];
__STATIC_INLINE__ sd::Tensor<float> sd_tensor_split_2d(const sd::Tensor<float>& input, int width, int height, int x, int y) {
GGML_ASSERT(input.dim() >= 4);
std::vector<int64_t> output_shape = input.shape();
output_shape[0] = width;
output_shape[1] = height;
sd::Tensor<float> output(std::move(output_shape));
int64_t input_width = input.shape()[0];
int64_t input_height = input.shape()[1];
int64_t input_plane = sd_tensor_plane_size(input);
int64_t output_plane = sd_tensor_plane_size(output);
int64_t plane_count = input.numel() / input_plane;
for (int iy = 0; iy < height; iy++) {
for (int ix = 0; ix < width; ix++) {
int64_t src_xy = (ix + x) % input_width + input_width * ((iy + y) % input_height);
int64_t dst_xy = ix + width * iy;
for (int64_t plane = 0; plane < plane_count; ++plane) {
output[plane * output_plane + dst_xy] = input[plane * input_plane + src_xy];
}
}
}
return output;
}
__STATIC_INLINE__ void sd_tensor_merge_2d(const sd::Tensor<float>& input,
sd::Tensor<float>* output,
int x,
int y,
int overlap_x,
int overlap_y,
bool circular_x,
bool circular_y,
int x_skip = 0,
int y_skip = 0) {
GGML_ASSERT(output != nullptr);
int64_t width = input.shape()[0];
int64_t height = input.shape()[1];
int64_t img_width = output->shape()[0];
int64_t img_height = output->shape()[1];
int64_t input_plane = sd_tensor_plane_size(input);
int64_t output_plane = sd_tensor_plane_size(*output);
int64_t plane_count = input.numel() / input_plane;
GGML_ASSERT(output->numel() / output_plane == plane_count);
// unclamped -> expects x in the range [0-1]
auto smootherstep_f32 = [](const float x) -> float {
GGML_ASSERT(x >= 0.f && x <= 1.f);
return x * x * x * (x * (6.0f * x - 15.0f) + 10.0f);
};
for (int iy = y_skip; iy < height; iy++) {
for (int ix = x_skip; ix < width; ix++) {
int64_t src_xy = ix + width * iy;
int64_t ox = (x + ix) % img_width;
int64_t oy = (y + iy) % img_height;
int64_t dst_xy = ox + img_width * oy;
for (int64_t plane = 0; plane < plane_count; ++plane) {
float new_value = input[plane * input_plane + src_xy];
if (overlap_x > 0 || overlap_y > 0) {
float old_value = (*output)[plane * output_plane + dst_xy];
const float x_f_0 = (circular_x || (overlap_x > 0 && x > 0)) ? (ix - x_skip) / float(overlap_x) : 1.f;
const float x_f_1 = (circular_x || (overlap_x > 0 && x < (img_width - width))) ? (width - ix) / float(overlap_x) : 1.f;
const float y_f_0 = (circular_y || (overlap_y > 0 && y > 0)) ? (iy - y_skip) / float(overlap_y) : 1.f;
const float y_f_1 = (circular_y || (overlap_y > 0 && y < (img_height - height))) ? (height - iy) / float(overlap_y) : 1.f;
const float x_f = std::min(std::min(x_f_0, x_f_1), 1.f);
const float y_f = std::min(std::min(y_f_0, y_f_1), 1.f);
(*output)[plane * output_plane + dst_xy] =
old_value + new_value * smootherstep_f32(y_f) * smootherstep_f32(x_f);
} else {
(*output)[plane * output_plane + dst_xy] = new_value;
}
}
}
}
}
template <typename Fn>
__STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& input,
int output_width,
int output_height,
int scale,
int p_tile_size_x,
int p_tile_size_y,
float tile_overlap_factor,
bool circular_x,
bool circular_y,
Fn&& on_processing,
bool silent = false) {
sd::Tensor<float> output;
int input_width = static_cast<int>(input.shape()[0]);
int input_height = static_cast<int>(input.shape()[1]);
GGML_ASSERT(((input_width / output_width) == (input_height / output_height)) && GGML_ASSERT(((input_width / output_width) == (input_height / output_height)) &&
((output_width / input_width) == (output_height / input_height))); ((output_width / input_width) == (output_height / input_height)));
@ -888,7 +856,8 @@ __STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& in
int small_width = output_width; int small_width = output_width;
int small_height = output_height; int small_height = output_height;
bool decode = output_width > input_width;
bool decode = output_width > input_width;
if (decode) { if (decode) {
small_width = input_width; small_width = input_width;
small_height = input_height; small_height = input_height;
@ -902,16 +871,25 @@ __STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& in
float tile_overlap_factor_y; float tile_overlap_factor_y;
sd_tiling_calc_tiles(num_tiles_y, tile_overlap_factor_y, small_height, p_tile_size_y, tile_overlap_factor, circular_y); sd_tiling_calc_tiles(num_tiles_y, tile_overlap_factor_y, small_height, p_tile_size_y, tile_overlap_factor, circular_y);
int tile_overlap_x = static_cast<int32_t>(p_tile_size_x * tile_overlap_factor_x); if (!slient) {
LOG_DEBUG("num tiles : %d, %d ", num_tiles_x, num_tiles_y);
LOG_DEBUG("optimal overlap : %f, %f (targeting %f)", tile_overlap_factor_x, tile_overlap_factor_y, tile_overlap_factor);
}
int tile_overlap_x = (int32_t)(p_tile_size_x * tile_overlap_factor_x);
int non_tile_overlap_x = p_tile_size_x - tile_overlap_x; int non_tile_overlap_x = p_tile_size_x - tile_overlap_x;
int tile_overlap_y = static_cast<int32_t>(p_tile_size_y * tile_overlap_factor_y);
int tile_overlap_y = (int32_t)(p_tile_size_y * tile_overlap_factor_y);
int non_tile_overlap_y = p_tile_size_y - tile_overlap_y; int non_tile_overlap_y = p_tile_size_y - tile_overlap_y;
int tile_size_x = p_tile_size_x < small_width ? p_tile_size_x : small_width;
int tile_size_y = p_tile_size_y < small_height ? p_tile_size_y : small_height; int tile_size_x = p_tile_size_x < small_width ? p_tile_size_x : small_width;
int tile_size_y = p_tile_size_y < small_height ? p_tile_size_y : small_height;
int input_tile_size_x = tile_size_x; int input_tile_size_x = tile_size_x;
int input_tile_size_y = tile_size_y; int input_tile_size_y = tile_size_y;
int output_tile_size_x = tile_size_x; int output_tile_size_x = tile_size_x;
int output_tile_size_y = tile_size_y; int output_tile_size_y = tile_size_y;
if (decode) { if (decode) {
output_tile_size_x *= scale; output_tile_size_x *= scale;
output_tile_size_y *= scale; output_tile_size_y *= scale;
@ -920,23 +898,41 @@ __STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& in
input_tile_size_y *= scale; input_tile_size_y *= scale;
} }
int num_tiles = num_tiles_x * num_tiles_y; ggml_init_params params = {};
int tile_count = 1; params.mem_size += input_tile_size_x * input_tile_size_y * input->ne[2] * input->ne[3] * sizeof(float); // input chunk
bool last_y = false; params.mem_size += output_tile_size_x * output_tile_size_y * output->ne[2] * output->ne[3] * sizeof(float); // output chunk
bool last_x = false; params.mem_size += 3 * ggml_tensor_overhead();
float last_time = 0.0f; params.mem_buffer = nullptr;
if (!silent) { params.no_alloc = false;
LOG_DEBUG("num tiles : %d, %d ", num_tiles_x, num_tiles_y);
LOG_DEBUG("optimal overlap : %f, %f (targeting %f)", tile_overlap_factor_x, tile_overlap_factor_y, tile_overlap_factor); if (!slient) {
LOG_DEBUG("tile work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
}
// draft context
ggml_context* tiles_ctx = ggml_init(params);
if (!tiles_ctx) {
LOG_ERROR("ggml_init() failed");
return;
}
// tiling
ggml_tensor* input_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, input_tile_size_x, input_tile_size_y, input->ne[2], input->ne[3]);
ggml_tensor* output_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, output_tile_size_x, output_tile_size_y, output->ne[2], output->ne[3]);
int num_tiles = num_tiles_x * num_tiles_y;
if (!slient) {
LOG_DEBUG("processing %i tiles", num_tiles); LOG_DEBUG("processing %i tiles", num_tiles);
pretty_progress(0, num_tiles, 0.0f); pretty_progress(0, num_tiles, 0.0f);
} }
int tile_count = 1;
bool last_y = false, last_x = false;
float last_time = 0.0f;
for (int y = 0; y < small_height && !last_y; y += non_tile_overlap_y) { for (int y = 0; y < small_height && !last_y; y += non_tile_overlap_y) {
int dy = 0; int dy = 0;
if (!circular_y && y + tile_size_y >= small_height) { if (!circular_y && y + tile_size_y >= small_height) {
int original_y = y; int _y = y;
y = small_height - tile_size_y; y = small_height - tile_size_y;
dy = original_y - y; dy = _y - y;
if (decode) { if (decode) {
dy *= scale; dy *= scale;
} }
@ -945,9 +941,9 @@ __STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& in
for (int x = 0; x < small_width && !last_x; x += non_tile_overlap_x) { for (int x = 0; x < small_width && !last_x; x += non_tile_overlap_x) {
int dx = 0; int dx = 0;
if (!circular_x && x + tile_size_x >= small_width) { if (!circular_x && x + tile_size_x >= small_width) {
int original_x = x; int _x = x;
x = small_width - tile_size_x; x = small_width - tile_size_x;
dx = original_x - x; dx = _x - x;
if (decode) { if (decode) {
dx *= scale; dx *= scale;
} }
@ -962,37 +958,38 @@ __STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& in
int overlap_x_out = decode ? tile_overlap_x * scale : tile_overlap_x; int overlap_x_out = decode ? tile_overlap_x * scale : tile_overlap_x;
int overlap_y_out = decode ? tile_overlap_y * scale : tile_overlap_y; int overlap_y_out = decode ? tile_overlap_y * scale : tile_overlap_y;
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
auto input_tile = sd_tensor_split_2d(input, input_tile_size_x, input_tile_size_y, x_in, y_in); ggml_ext_tensor_split_2d(input, input_tile, x_in, y_in);
auto output_tile = on_processing(input_tile); if (on_processing(input_tile, output_tile, false)) {
if (output_tile.empty()) { ggml_ext_tensor_merge_2d(output_tile, output, x_out, y_out, overlap_x_out, overlap_y_out, circular_x, circular_y, dx, dy);
return {};
}
GGML_ASSERT(output_tile.shape()[0] == output_tile_size_x && output_tile.shape()[1] == output_tile_size_y);
if (output.empty()) {
std::vector<int64_t> output_shape = output_tile.shape();
output_shape[0] = output_width;
output_shape[1] = output_height;
output = sd::Tensor<float>::zeros(std::move(output_shape));
}
sd_tensor_merge_2d(output_tile, &output, x_out, y_out, overlap_x_out, overlap_y_out, circular_x, circular_y, dx, dy);
if (!silent) {
int64_t t2 = ggml_time_ms(); int64_t t2 = ggml_time_ms();
last_time = (t2 - t1) / 1000.0f; last_time = (t2 - t1) / 1000.0f;
pretty_progress(tile_count, num_tiles, last_time); pretty_progress(tile_count, num_tiles, last_time);
} else {
LOG_ERROR("Failed to process patch %d at (%d, %d)", tile_count, x, y);
} }
tile_count++; tile_count++;
} }
last_x = false; last_x = false;
} }
if (!silent && tile_count < num_tiles) { if (!slient) {
pretty_progress(num_tiles, num_tiles, last_time); if (tile_count < num_tiles) {
pretty_progress(num_tiles, num_tiles, last_time);
}
} }
if (output.empty()) { ggml_free(tiles_ctx);
return {}; }
}
return output; __STATIC_INLINE__ void sd_tiling(ggml_tensor* input,
ggml_tensor* output,
const int scale,
const int tile_size,
const float tile_overlap_factor,
const bool circular_x,
const bool circular_y,
on_tile_process on_processing) {
sd_tiling_non_square(input, output, scale, tile_size, tile_size, tile_overlap_factor, circular_x, circular_y, on_processing);
} }
__STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm_32(ggml_context* ctx, __STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm_32(ggml_context* ctx,
@ -1591,18 +1588,6 @@ __STATIC_INLINE__ void set_timestep_embedding(std::vector<float> timesteps,
memcpy(((char*)embedding->data), ((char*)embedding_vec.data()), ggml_nbytes(embedding)); memcpy(((char*)embedding->data), ((char*)embedding_vec.data()), ggml_nbytes(embedding));
} }
__STATIC_INLINE__ void set_timestep_embedding(std::vector<float> timesteps,
sd::Tensor<float>* embedding,
int dim,
int max_period = 10000) {
GGML_ASSERT(embedding != nullptr);
std::vector<float> embedding_vec = timestep_embedding(timesteps, dim, max_period);
if (embedding->numel() != static_cast<int64_t>(embedding_vec.size())) {
embedding->resize({dim, static_cast<int64_t>(timesteps.size())});
}
std::copy(embedding_vec.begin(), embedding_vec.end(), embedding->values().begin());
}
__STATIC_INLINE__ ggml_tensor* new_timestep_embedding(ggml_context* ctx, __STATIC_INLINE__ ggml_tensor* new_timestep_embedding(ggml_context* ctx,
std::vector<float> timesteps, std::vector<float> timesteps,
int dim, int dim,
@ -1720,32 +1705,6 @@ protected:
bool circular_x_enabled = false; bool circular_x_enabled = false;
bool circular_y_enabled = false; bool circular_y_enabled = false;
template <typename T>
static sd::Tensor<T> take_or_empty(std::optional<sd::Tensor<T>> tensor) {
if (!tensor.has_value()) {
return {};
}
return std::move(*tensor);
}
template <typename T>
static sd::Tensor<T> restore_trailing_singleton_dims(std::optional<sd::Tensor<T>> tensor,
size_t expected_dim) {
return restore_trailing_singleton_dims(take_or_empty(std::move(tensor)), expected_dim);
}
template <typename T>
static sd::Tensor<T> restore_trailing_singleton_dims(sd::Tensor<T> tensor,
size_t expected_dim) {
if (tensor.empty()) {
return tensor;
}
while (static_cast<size_t>(tensor.dim()) < expected_dim) {
tensor.unsqueeze_(tensor.dim());
}
return tensor;
}
void alloc_params_ctx() { void alloc_params_ctx() {
ggml_init_params params; ggml_init_params params;
params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead()); params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead());
@ -2083,29 +2042,6 @@ public:
backend_tensor_data_map[tensor] = data; backend_tensor_data_map[tensor] = data;
} }
template <typename T>
ggml_tensor* make_input(const sd::Tensor<T>& tensor) {
ggml_tensor* input = sd::make_ggml_tensor(compute_ctx, tensor, false);
set_backend_tensor_data(input, tensor.data());
return input;
}
template <typename T>
ggml_tensor* make_optional_input(const sd::Tensor<T>& tensor) {
if (tensor.empty()) {
return nullptr;
}
return make_input(tensor);
}
template <typename T>
ggml_tensor* make_optional_input(const sd::Tensor<T>* tensor) {
if (tensor == nullptr) {
return nullptr;
}
return make_input(*tensor);
}
ggml_tensor* to_backend(ggml_tensor* tensor) { ggml_tensor* to_backend(ggml_tensor* tensor) {
GGML_ASSERT(compute_ctx != nullptr); GGML_ASSERT(compute_ctx != nullptr);
if (tensor == nullptr) { if (tensor == nullptr) {
@ -2134,24 +2070,24 @@ public:
return ggml_get_tensor(cache_ctx, name.c_str()); return ggml_get_tensor(cache_ctx, name.c_str());
} }
template <typename T> bool compute(get_graph_cb_t get_graph,
std::optional<sd::Tensor<T>> compute(get_graph_cb_t get_graph, int n_threads,
int n_threads, bool free_compute_buffer_immediately = true,
bool free_compute_buffer_immediately, ggml_tensor** output = nullptr,
bool no_return = false) { ggml_context* output_ctx = nullptr) {
if (!offload_params_to_runtime_backend()) { if (!offload_params_to_runtime_backend()) {
LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str()); LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str());
return std::nullopt; return false;
} }
if (!alloc_compute_buffer(get_graph)) { if (!alloc_compute_buffer(get_graph)) {
LOG_ERROR("%s alloc compute buffer failed", get_desc().c_str()); LOG_ERROR("%s alloc compute buffer failed", get_desc().c_str());
return std::nullopt; return false;
} }
reset_compute_ctx(); reset_compute_ctx();
ggml_cgraph* gf = get_compute_graph(get_graph); ggml_cgraph* gf = get_compute_graph(get_graph);
if (!ggml_gallocr_alloc_graph(compute_allocr, gf)) { if (!ggml_gallocr_alloc_graph(compute_allocr, gf)) {
LOG_ERROR("%s alloc compute graph failed", get_desc().c_str()); LOG_ERROR("%s alloc compute graph failed", get_desc().c_str());
return std::nullopt; return false;
} }
copy_data_to_backend_tensor(); copy_data_to_backend_tensor();
if (ggml_backend_is_cpu(runtime_backend)) { if (ggml_backend_is_cpu(runtime_backend)) {
@ -2161,19 +2097,26 @@ public:
ggml_status status = ggml_backend_graph_compute(runtime_backend, gf); ggml_status status = ggml_backend_graph_compute(runtime_backend, gf);
if (status != GGML_STATUS_SUCCESS) { if (status != GGML_STATUS_SUCCESS) {
LOG_ERROR("%s compute failed: %s", get_desc().c_str(), ggml_status_to_string(status)); LOG_ERROR("%s compute failed: %s", get_desc().c_str(), ggml_status_to_string(status));
return std::nullopt; return false;
} }
#ifdef GGML_PERF
ggml_graph_print(gf);
#endif
copy_cache_tensors_to_cache_buffer(); copy_cache_tensors_to_cache_buffer();
auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str()); if (output != nullptr) {
std::optional<sd::Tensor<T>> output; auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str());
if (!no_return) { if (*output == nullptr && output_ctx != nullptr) {
output = sd::make_sd_tensor_from_ggml<T>(result); *output = ggml_dup_tensor(output_ctx, result);
}
if (*output != nullptr) {
ggml_ext_backend_tensor_get_and_sync(runtime_backend, result, (*output)->data, 0, ggml_nbytes(*output));
}
} }
if (free_compute_buffer_immediately) { if (free_compute_buffer_immediately) {
free_compute_buffer(); free_compute_buffer();
} }
return output; return true;
} }
void set_flash_attention_enabled(bool enabled) { void set_flash_attention_enabled(bool enabled) {

View File

@ -1,8 +1,6 @@
#include <algorithm>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include "ggml.h" #include "ggml.h"
#include "tensor.hpp"
const float wan_21_latent_rgb_proj[16][3] = { const float wan_21_latent_rgb_proj[16][3] = {
{0.015123f, -0.148418f, 0.479828f}, {0.015123f, -0.148418f, 0.479828f},
@ -234,67 +232,3 @@ void preview_latent_video(uint8_t* buffer, ggml_tensor* latents, const float (*l
} }
} }
} }
static inline bool preview_latent_tensor_is_video(const sd::Tensor<float>& latents) {
return latents.dim() == 5;
}
void preview_latent_video(uint8_t* buffer, const sd::Tensor<float>& latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
uint32_t latent_width = static_cast<uint32_t>(latents.shape()[0]);
uint32_t latent_height = static_cast<uint32_t>(latents.shape()[1]);
bool is_video = preview_latent_tensor_is_video(latents);
uint32_t frames = is_video ? static_cast<uint32_t>(latents.shape()[2]) : 1;
uint32_t dim = is_video ? static_cast<uint32_t>(latents.shape()[3]) : static_cast<uint32_t>(latents.shape()[2]);
uint32_t rgb_width = latent_width * patch_size;
uint32_t rgb_height = latent_height * patch_size;
uint32_t unpatched_dim = dim / (patch_size * patch_size);
for (uint32_t k = 0; k < frames; k++) {
for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) {
for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) {
uint32_t latent_x = rgb_x / patch_size;
uint32_t latent_y = rgb_y / patch_size;
uint32_t channel_offset = 0;
if (patch_size > 1) {
channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size));
}
size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x;
auto latent_value = [&](uint32_t latent_channel) -> float {
return is_video
? latents.values()[latent_x + latent_width * (latent_y + latent_height * (k + frames * latent_channel))]
: latents.values()[latent_x + latent_width * (latent_y + latent_height * latent_channel)];
};
float r = 0.f, g = 0.f, b = 0.f;
if (latent_rgb_proj != nullptr) {
for (uint32_t d = 0; d < unpatched_dim; d++) {
uint32_t latent_channel = d * patch_size * patch_size + channel_offset;
float value = latent_value(latent_channel);
r += value * latent_rgb_proj[d][0];
g += value * latent_rgb_proj[d][1];
b += value * latent_rgb_proj[d][2];
}
} else {
r = latent_value(0);
g = latent_value(1);
b = latent_value(2);
}
if (latent_rgb_bias != nullptr) {
r += latent_rgb_bias[0];
g += latent_rgb_bias[1];
b += latent_rgb_bias[2];
}
r = std::min(1.0f, std::max(0.0f, r * .5f + .5f));
g = std::min(1.0f, std::max(0.0f, g * .5f + .5f));
b = std::min(1.0f, std::max(0.0f, b * .5f + .5f));
buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255);
buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255);
buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255);
}
}
}
}

View File

@ -194,7 +194,6 @@ namespace LLM {
bool padding = false) { bool padding = false) {
if (add_bos_token) { if (add_bos_token) {
tokens.insert(tokens.begin(), BOS_TOKEN_ID); tokens.insert(tokens.begin(), BOS_TOKEN_ID);
weights.insert(weights.begin(), 1.f);
} }
if (max_length > 0 && padding) { if (max_length > 0 && padding) {
size_t n = static_cast<size_t>(std::ceil(tokens.size() * 1.f / max_length)); size_t n = static_cast<size_t>(std::ceil(tokens.size() * 1.f / max_length));
@ -1181,17 +1180,16 @@ namespace LLM {
return hidden_states; return hidden_states;
} }
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor, ggml_cgraph* build_graph(ggml_tensor* input_ids,
const sd::Tensor<float>& attention_mask_tensor, ggml_tensor* attention_mask,
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds_tensor, std::vector<std::pair<int, ggml_tensor*>> image_embeds,
std::set<int> out_layers) { std::set<int> out_layers) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* input_ids = make_input(input_ids_tensor);
std::vector<std::pair<int, ggml_tensor*>> image_embeds; input_ids = to_backend(input_ids);
image_embeds.reserve(image_embeds_tensor.size());
for (const auto& [idx, embed_tensor] : image_embeds_tensor) { for (auto& image_embed : image_embeds) {
ggml_tensor* embed = make_input(embed_tensor); image_embed.second = to_backend(image_embed.second);
image_embeds.emplace_back(idx, embed);
} }
int64_t n_tokens = input_ids->ne[0]; int64_t n_tokens = input_ids->ne[0];
@ -1215,9 +1213,8 @@ namespace LLM {
input_pos_vec.size()); input_pos_vec.size());
set_backend_tensor_data(input_pos, input_pos_vec.data()); set_backend_tensor_data(input_pos, input_pos_vec.data());
ggml_tensor* attention_mask = nullptr; if (attention_mask != nullptr) {
if (!attention_mask_tensor.empty()) { attention_mask = to_backend(attention_mask);
attention_mask = make_input(attention_mask_tensor);
} else { } else {
attention_mask_vec.resize(n_tokens * n_tokens); attention_mask_vec.resize(n_tokens * n_tokens);
for (int i0 = 0; i0 < n_tokens; i0++) { for (int i0 = 0; i0 < n_tokens; i0++) {
@ -1242,15 +1239,17 @@ namespace LLM {
return gf; return gf;
} }
sd::Tensor<float> compute(const int n_threads, bool compute(const int n_threads,
const sd::Tensor<int32_t>& input_ids, ggml_tensor* input_ids,
const sd::Tensor<float>& attention_mask, ggml_tensor* attention_mask,
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds, std::vector<std::pair<int, ggml_tensor*>> image_embeds,
std::set<int> out_layers) { std::set<int> out_layers,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(input_ids, attention_mask, image_embeds, out_layers); return build_graph(input_ids, attention_mask, image_embeds, out_layers);
}; };
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true)); return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
} }
int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) { int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) {
@ -1289,9 +1288,8 @@ namespace LLM {
return image; return image;
} }
ggml_cgraph* build_encode_image_graph(const sd::Tensor<float>& image_tensor) { ggml_cgraph* build_encode_image_graph(ggml_tensor* image) {
ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
ggml_tensor* image = make_input(image_tensor);
GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0); GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0); GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
@ -1303,6 +1301,8 @@ namespace LLM {
int llm_grid_w = grid_w / params.vision.spatial_merge_size; int llm_grid_w = grid_w / params.vision.spatial_merge_size;
int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size; int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size;
image = to_backend(image);
auto pixel_values = process_image(compute_ctx, image); auto pixel_values = process_image(compute_ctx, image);
// window index // window index
@ -1411,12 +1411,14 @@ namespace LLM {
return gf; return gf;
} }
sd::Tensor<float> encode_image(const int n_threads, void encode_image(const int n_threads,
const sd::Tensor<float>& image) { ggml_tensor* image,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_encode_image_graph(image); return build_encode_image_graph(image);
}; };
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, false)); GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
}; };
@ -1495,41 +1497,39 @@ namespace LLM {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
bool test_mistral = false; bool test_mistral = false;
bool test_qwen3 = true; bool test_qwen3 = true;
bool test_vit = false; bool test_vit = false;
bool test_decoder_with_vit = false; bool test_decoder_with_vit = false;
if (test_decoder_with_vit) { if (test_decoder_with_vit) {
sd::Tensor<float> image_embed; ggml_tensor* image_embed = nullptr;
{ {
auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin"); auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
print_sd_tensor(image, false, "image"); print_ggml_tensor(image, false, "image");
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = model.encode_image(8, image); model.encode_image(8, image, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out, false, "image_embed");
out = std::move(out_opt);
print_sd_tensor(out, false, "image_embed");
image_embed = out; image_embed = out;
LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0); LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0);
} }
std::string placeholder = "<|image_pad|>"; std::string placeholder = "<|image_pad|>";
std::string img_prompt = "Picture 1: <|vision_start|>"; // [24669, 220, 16, 25, 220, 151652] std::string img_prompt = "Picture 1: <|vision_start|>"; // [24669, 220, 16, 25, 220, 151652]
int64_t num_image_tokens = image_embed.shape()[1]; int64_t num_image_tokens = image_embed->ne[1];
img_prompt.reserve(num_image_tokens * placeholder.size()); img_prompt.reserve(num_image_tokens * placeholder.size());
for (int i = 0; i < num_image_tokens; i++) { for (int i = 0; i < num_image_tokens; i++) {
img_prompt += placeholder; img_prompt += placeholder;
} }
img_prompt += "<|vision_end|>"; img_prompt += "<|vision_end|>";
std::vector<std::pair<int, sd::Tensor<float>>> image_embeds; std::vector<std::pair<int, ggml_tensor*>> image_embeds;
image_embeds.emplace_back(64, image_embed); image_embeds.emplace_back(64, image_embed);
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
@ -1547,33 +1547,29 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), image_embeds, {}); model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else if (test_vit) { } else if (test_vit) {
// auto image = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 280, 280, 3); // auto image = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 280, 280, 3);
// ggml_set_f32(image, 0.f); // ggml_set_f32(image, 0.f);
auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin"); auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
print_sd_tensor(image, false, "image"); print_ggml_tensor(image, false, "image");
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = model.encode_image(8, image); model.encode_image(8, image, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out, false, "out");
out = std::move(out_opt);
print_sd_tensor(out, false, "out");
// auto ref_out = load_tensor_from_file(ctx, "qwen2vl.bin"); // auto ref_out = load_tensor_from_file(work_ctx, "qwen2vl.bin");
// ggml_ext_tensor_diff(ref_out, out, 0.01f); // ggml_ext_tensor_diff(ref_out, out, 0.01f);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
@ -1591,16 +1587,14 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {10, 20, 30}); model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else if (test_qwen3) { } else if (test_qwen3) {
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
@ -1616,16 +1610,14 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {35}); model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else { } else {
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
@ -1641,16 +1633,14 @@ namespace LLM {
printf("%d ", token); printf("%d ", token);
} }
printf("\n"); printf("\n");
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {}); model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0); LOG_DEBUG("llm test done in %lldms", t1 - t0);
} }
} }

View File

@ -792,7 +792,7 @@ struct LoraModel : public GGMLRunner {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_lora_graph(model_tensors, version); return build_lora_graph(model_tensors, version);
}; };
GGMLRunner::compute<float>(get_graph, n_threads, false, true); GGMLRunner::compute(get_graph, n_threads, false);
stat(); stat();
for (auto item : original_tensor_to_final_tensor) { for (auto item : original_tensor_to_final_tensor) {
ggml_tensor* original_tensor = item.first; ggml_tensor* original_tensor = item.first;

View File

@ -836,17 +836,17 @@ struct MMDiTRunner : public GGMLRunner {
mmdit.get_param_tensors(tensors, prefix); mmdit.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor = {}, ggml_tensor* context,
const sd::Tensor<float>& y_tensor = {}, ggml_tensor* y,
std::vector<int> skip_layers = std::vector<int>()) { std::vector<int> skip_layers = std::vector<int>()) {
ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor); x = to_backend(x);
ggml_tensor* timesteps = make_input(timesteps_tensor); context = to_backend(context);
ggml_tensor* context = make_optional_input(context_tensor); y = to_backend(y);
ggml_tensor* y = make_optional_input(y_tensor); timesteps = to_backend(timesteps);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
ggml_tensor* out = mmdit.forward(&runner_ctx, ggml_tensor* out = mmdit.forward(&runner_ctx,
@ -861,12 +861,14 @@ struct MMDiTRunner : public GGMLRunner {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context = {}, ggml_tensor* context,
const sd::Tensor<float>& y = {}, ggml_tensor* y,
std::vector<int> skip_layers = std::vector<int>()) { ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
@ -875,7 +877,7 @@ struct MMDiTRunner : public GGMLRunner {
return build_graph(x, timesteps, context, y, skip_layers); return build_graph(x, timesteps, context, y, skip_layers);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
void test() { void test() {
@ -884,41 +886,35 @@ struct MMDiTRunner : public GGMLRunner {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// cpu f16: pass // cpu f16: pass
// cpu f32: pass // cpu f32: pass
// cuda f16: pass // cuda f16: pass
// cuda f32: pass // cuda f32: pass
sd::Tensor<float> x({128, 128, 16, 1}); auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 128, 128, 16, 1);
std::vector<float> timesteps_vec(1, 999.f); std::vector<float> timesteps_vec(1, 999.f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec); auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
x.fill_(0.01f); ggml_set_f32(x, 0.01f);
// print_ggml_tensor(x); // print_ggml_tensor(x);
sd::Tensor<float> context({4096, 154, 1}); auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 154, 1);
context.fill_(0.01f); ggml_set_f32(context, 0.01f);
// print_ggml_tensor(context); // print_ggml_tensor(context);
sd::Tensor<float> y({2048, 1}); auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 2048, 1);
y.fill_(0.01f); ggml_set_f32(y, 0.01f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, compute(8, x, timesteps, context, y, &out, work_ctx);
x, int64_t t1 = ggml_time_ms();
timesteps,
context,
y);
int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("mmdit test done in %lldms", t1 - t0); LOG_DEBUG("mmdit test done in %lldms", t1 - t0);
} }
} }

View File

@ -162,7 +162,43 @@ uint16_t f8_e4m3_to_f16(uint8_t f8) {
} }
uint16_t f8_e5m2_to_f16(uint8_t fp8) { uint16_t f8_e5m2_to_f16(uint8_t fp8) {
return static_cast<uint16_t>(fp8) << 8; uint8_t sign = (fp8 >> 7) & 0x1;
uint8_t exponent = (fp8 >> 2) & 0x1F;
uint8_t mantissa = fp8 & 0x3;
uint16_t fp16_sign = sign << 15;
uint16_t fp16_exponent;
uint16_t fp16_mantissa;
if (exponent == 0 && mantissa == 0) { // zero
return fp16_sign;
}
if (exponent == 0x1F) { // NAN and INF
fp16_exponent = 0x1F;
fp16_mantissa = mantissa ? (mantissa << 8) : 0;
return fp16_sign | (fp16_exponent << 10) | fp16_mantissa;
}
if (exponent == 0) { // subnormal numbers
fp16_mantissa = (mantissa << 8);
return fp16_sign | fp16_mantissa;
}
// normal numbers
int16_t true_exponent = (int16_t)exponent - 15 + 15;
if (true_exponent <= 0) {
fp16_exponent = 0;
fp16_mantissa = (mantissa << 8);
} else if (true_exponent >= 0x1F) {
fp16_exponent = 0x1F;
fp16_mantissa = 0;
} else {
fp16_exponent = (uint16_t)true_exponent;
fp16_mantissa = mantissa << 8;
}
return fp16_sign | (fp16_exponent << 10) | fp16_mantissa;
} }
void f8_e4m3_to_f16_vec(uint8_t* src, uint16_t* dst, int64_t n) { void f8_e4m3_to_f16_vec(uint8_t* src, uint16_t* dst, int64_t n) {

View File

@ -443,10 +443,11 @@ public:
id_encoder2.get_param_tensors(tensors, prefix); id_encoder2.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& id_pixel_values_tensor, ggml_cgraph* build_graph( // ggml_allocr* allocr,
const sd::Tensor<float>& prompt_embeds_tensor, ggml_tensor* id_pixel_values,
std::vector<bool>& class_tokens_mask, ggml_tensor* prompt_embeds,
const sd::Tensor<float>& id_embeds_tensor = {}) { std::vector<bool>& class_tokens_mask,
ggml_tensor* id_embeds) {
ctm.clear(); ctm.clear();
ctmf16.clear(); ctmf16.clear();
ctmpos.clear(); ctmpos.clear();
@ -459,16 +460,16 @@ public:
ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* id_pixel_values = make_input(id_pixel_values_tensor);
ggml_tensor* prompt_embeds = make_input(prompt_embeds_tensor);
ggml_tensor* id_embeds = make_optional_input(id_embeds_tensor);
int64_t hidden_size = prompt_embeds->ne[0]; int64_t hidden_size = prompt_embeds->ne[0];
int64_t seq_length = prompt_embeds->ne[1]; int64_t seq_length = prompt_embeds->ne[1];
ggml_type type = GGML_TYPE_F32; ggml_type type = GGML_TYPE_F32;
ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size()); ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values);
ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
ggml_tensor* id_embeds_d = to_backend(id_embeds);
ggml_tensor* left = nullptr; ggml_tensor* left = nullptr;
ggml_tensor* right = nullptr; ggml_tensor* right = nullptr;
for (int i = 0; i < class_tokens_mask.size(); i++) { for (int i = 0; i < class_tokens_mask.size(); i++) {
@ -528,18 +529,18 @@ public:
ggml_tensor* updated_prompt_embeds = nullptr; ggml_tensor* updated_prompt_embeds = nullptr;
if (pm_version == PM_VERSION_1) if (pm_version == PM_VERSION_1)
updated_prompt_embeds = id_encoder.forward(&runner_ctx, updated_prompt_embeds = id_encoder.forward(&runner_ctx,
id_pixel_values, id_pixel_values_d,
prompt_embeds, prompt_embeds_d,
class_tokens_mask_d, class_tokens_mask_d,
class_tokens_mask_pos, class_tokens_mask_pos,
left, right); left, right);
else if (pm_version == PM_VERSION_2) else if (pm_version == PM_VERSION_2)
updated_prompt_embeds = id_encoder2.forward(&runner_ctx, updated_prompt_embeds = id_encoder2.forward(&runner_ctx,
id_pixel_values, id_pixel_values_d,
prompt_embeds, prompt_embeds_d,
class_tokens_mask_d, class_tokens_mask_d,
class_tokens_mask_pos, class_tokens_mask_pos,
id_embeds, id_embeds_d,
left, right); left, right);
ggml_build_forward_expand(gf, updated_prompt_embeds); ggml_build_forward_expand(gf, updated_prompt_embeds);
@ -547,16 +548,20 @@ public:
return gf; return gf;
} }
sd::Tensor<float> compute(const int n_threads, bool compute(const int n_threads,
const sd::Tensor<float>& id_pixel_values, ggml_tensor* id_pixel_values,
const sd::Tensor<float>& prompt_embeds, ggml_tensor* prompt_embeds,
const sd::Tensor<float>& id_embeds, ggml_tensor* id_embeds,
std::vector<bool>& class_tokens_mask) { std::vector<bool>& class_tokens_mask,
ggml_tensor** updated_prompt_embeds,
ggml_context* output_ctx) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds); return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
}; };
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true)); // GGMLRunner::compute(get_graph, n_threads, updated_prompt_embeds);
return GGMLRunner::compute(get_graph, n_threads, true, updated_prompt_embeds, output_ctx);
} }
}; };

View File

@ -1,241 +1,179 @@
#ifndef __PREPROCESSING_HPP__ #ifndef __PREPROCESSING_HPP__
#define __PREPROCESSING_HPP__ #define __PREPROCESSING_HPP__
#include <cmath>
#include <limits>
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#define M_PI_ 3.14159265358979323846f #define M_PI_ 3.14159265358979323846f
static inline int64_t preprocessing_offset_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { void convolve(ggml_tensor* input, ggml_tensor* output, ggml_tensor* kernel, int padding) {
const auto& shape = tensor.shape(); ggml_init_params params;
int64_t n0 = shape.size() > 0 ? shape[0] : 1; params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
int64_t n1 = shape.size() > 1 ? shape[1] : 1; params.mem_buffer = nullptr;
int64_t n2 = shape.size() > 2 ? shape[2] : 1; params.no_alloc = false;
return ((i3 * n2 + i2) * n1 + i1) * n0 + i0; ggml_context* ctx0 = ggml_init(params);
ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
ggml_fp32_to_fp16_row((float*)kernel->data, (ggml_fp16_t*)kernel_fp16->data, ggml_nelements(kernel));
ggml_tensor* h = ggml_conv_2d(ctx0, kernel_fp16, input, 1, 1, padding, padding, 1, 1);
ggml_cgraph* gf = ggml_new_graph(ctx0);
ggml_build_forward_expand(gf, ggml_cpy(ctx0, h, output));
ggml_graph_compute_with_ctx(ctx0, gf, 1);
ggml_free(ctx0);
} }
static inline float preprocessing_get_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) { void gaussian_kernel(ggml_tensor* kernel) {
return tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))]; int ks_mid = static_cast<int>(kernel->ne[0] / 2);
}
static inline void preprocessing_set_4d(sd::Tensor<float>& tensor, float value, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value;
}
static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t image) {
sd::Tensor<float> tensor({static_cast<int64_t>(image.width), static_cast<int64_t>(image.height), static_cast<int64_t>(image.channel), 1});
for (uint32_t y = 0; y < image.height; ++y) {
for (uint32_t x = 0; x < image.width; ++x) {
for (uint32_t c = 0; c < image.channel; ++c) {
preprocessing_set_4d(tensor, sd_image_get_f32(image, x, y, c), x, y, c, 0);
}
}
}
return tensor;
}
static inline void preprocessing_tensor_to_sd_image(const sd::Tensor<float>& tensor, uint8_t* image_data) {
GGML_ASSERT(tensor.dim() == 4);
GGML_ASSERT(tensor.shape()[3] == 1);
GGML_ASSERT(image_data != nullptr);
int width = static_cast<int>(tensor.shape()[0]);
int height = static_cast<int>(tensor.shape()[1]);
int channel = static_cast<int>(tensor.shape()[2]);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
for (int c = 0; c < channel; ++c) {
float value = preprocessing_get_4d(tensor, x, y, c, 0);
value = std::min(1.0f, std::max(0.0f, value));
image_data[(y * width + x) * channel + c] = static_cast<uint8_t>(std::round(value * 255.0f));
}
}
}
}
static inline sd::Tensor<float> gaussian_kernel_tensor(int kernel_size) {
sd::Tensor<float> kernel({kernel_size, kernel_size, 1, 1});
int ks_mid = kernel_size / 2;
float sigma = 1.4f; float sigma = 1.4f;
float normal = 1.f / (2.0f * M_PI_ * std::pow(sigma, 2.0f)); float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f));
for (int y = 0; y < kernel_size; ++y) { for (int y = 0; y < kernel->ne[0]; y++) {
float gx = static_cast<float>(-ks_mid + y); float gx = static_cast<float>(-ks_mid + y);
for (int x = 0; x < kernel_size; ++x) { for (int x = 0; x < kernel->ne[1]; x++) {
float gy = static_cast<float>(-ks_mid + x); float gy = static_cast<float>(-ks_mid + x);
float k = std::exp(-((gx * gx + gy * gy) / (2.0f * std::pow(sigma, 2.0f)))) * normal; float k_ = expf(-((gx * gx + gy * gy) / (2.0f * powf(sigma, 2.0f)))) * normal;
preprocessing_set_4d(kernel, k, x, y, 0, 0); ggml_ext_tensor_set_f32(kernel, k_, x, y);
} }
} }
return kernel;
} }
static inline sd::Tensor<float> convolve_tensor(const sd::Tensor<float>& input, const sd::Tensor<float>& kernel, int padding) { void grayscale(ggml_tensor* rgb_img, ggml_tensor* grayscale) {
GGML_ASSERT(input.dim() == 4); for (int iy = 0; iy < rgb_img->ne[1]; iy++) {
GGML_ASSERT(kernel.dim() == 4); for (int ix = 0; ix < rgb_img->ne[0]; ix++) {
GGML_ASSERT(input.shape()[3] == 1); float r = ggml_ext_tensor_get_f32(rgb_img, ix, iy);
GGML_ASSERT(kernel.shape()[2] == 1); float g = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 1);
GGML_ASSERT(kernel.shape()[3] == 1); float b = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 2);
sd::Tensor<float> output(input.shape());
int64_t width = input.shape()[0];
int64_t height = input.shape()[1];
int64_t channels = input.shape()[2];
int64_t kernel_w = kernel.shape()[0];
int64_t kernel_h = kernel.shape()[1];
for (int64_t c = 0; c < channels; ++c) {
for (int64_t y = 0; y < height; ++y) {
for (int64_t x = 0; x < width; ++x) {
float sum = 0.0f;
for (int64_t ky = 0; ky < kernel_h; ++ky) {
int64_t iy = y + ky - padding;
if (iy < 0 || iy >= height) {
continue;
}
for (int64_t kx = 0; kx < kernel_w; ++kx) {
int64_t ix = x + kx - padding;
if (ix < 0 || ix >= width) {
continue;
}
sum += preprocessing_get_4d(input, ix, iy, c, 0) * preprocessing_get_4d(kernel, kx, ky, 0, 0);
}
}
preprocessing_set_4d(output, sum, x, y, c, 0);
}
}
}
return output;
}
static inline sd::Tensor<float> grayscale_tensor(const sd::Tensor<float>& rgb_img) {
GGML_ASSERT(rgb_img.dim() == 4);
GGML_ASSERT(rgb_img.shape()[2] >= 3);
sd::Tensor<float> grayscale({rgb_img.shape()[0], rgb_img.shape()[1], 1, rgb_img.shape()[3]});
for (int64_t iy = 0; iy < rgb_img.shape()[1]; ++iy) {
for (int64_t ix = 0; ix < rgb_img.shape()[0]; ++ix) {
float r = preprocessing_get_4d(rgb_img, ix, iy, 0, 0);
float g = preprocessing_get_4d(rgb_img, ix, iy, 1, 0);
float b = preprocessing_get_4d(rgb_img, ix, iy, 2, 0);
float gray = 0.2989f * r + 0.5870f * g + 0.1140f * b; float gray = 0.2989f * r + 0.5870f * g + 0.1140f * b;
preprocessing_set_4d(grayscale, gray, ix, iy, 0, 0); ggml_ext_tensor_set_f32(grayscale, gray, ix, iy);
} }
} }
return grayscale;
} }
static inline sd::Tensor<float> tensor_hypot(const sd::Tensor<float>& x, const sd::Tensor<float>& y) { void prop_hypot(ggml_tensor* x, ggml_tensor* y, ggml_tensor* h) {
sd::tensor_check_same_shape(x, y); int n_elements = static_cast<int>(ggml_nelements(h));
sd::Tensor<float> out(x.shape()); float* dx = (float*)x->data;
for (int64_t i = 0; i < out.numel(); ++i) { float* dy = (float*)y->data;
out[i] = std::sqrt(x[i] * x[i] + y[i] * y[i]); float* dh = (float*)h->data;
for (int i = 0; i < n_elements; i++) {
dh[i] = sqrtf(dx[i] * dx[i] + dy[i] * dy[i]);
} }
return out;
} }
static inline sd::Tensor<float> tensor_arctan2(const sd::Tensor<float>& x, const sd::Tensor<float>& y) { void prop_arctan2(ggml_tensor* x, ggml_tensor* y, ggml_tensor* h) {
sd::tensor_check_same_shape(x, y); int n_elements = static_cast<int>(ggml_nelements(h));
sd::Tensor<float> out(x.shape()); float* dx = (float*)x->data;
for (int64_t i = 0; i < out.numel(); ++i) { float* dy = (float*)y->data;
out[i] = std::atan2(y[i], x[i]); float* dh = (float*)h->data;
for (int i = 0; i < n_elements; i++) {
dh[i] = atan2f(dy[i], dx[i]);
} }
return out;
} }
static inline void normalize_tensor(sd::Tensor<float>* g) { void normalize_tensor(ggml_tensor* g) {
GGML_ASSERT(g != nullptr); int n_elements = static_cast<int>(ggml_nelements(g));
if (g->empty()) { float* dg = (float*)g->data;
return; float max = -INFINITY;
for (int i = 0; i < n_elements; i++) {
max = dg[i] > max ? dg[i] : max;
} }
float max_value = -std::numeric_limits<float>::infinity(); max = 1.0f / max;
for (int64_t i = 0; i < g->numel(); ++i) { for (int i = 0; i < n_elements; i++) {
max_value = std::max(max_value, (*g)[i]); dg[i] *= max;
} }
if (max_value == 0.0f || !std::isfinite(max_value)) {
return;
}
*g *= (1.0f / max_value);
} }
static inline sd::Tensor<float> non_max_supression(const sd::Tensor<float>& G, const sd::Tensor<float>& D) { void non_max_supression(ggml_tensor* result, ggml_tensor* G, ggml_tensor* D) {
GGML_ASSERT(G.shape() == D.shape()); for (int iy = 1; iy < result->ne[1] - 1; iy++) {
sd::Tensor<float> result = sd::Tensor<float>::zeros(G.shape()); for (int ix = 1; ix < result->ne[0] - 1; ix++) {
for (int64_t iy = 1; iy < result.shape()[1] - 1; ++iy) { float angle = ggml_ext_tensor_get_f32(D, ix, iy) * 180.0f / M_PI_;
for (int64_t ix = 1; ix < result.shape()[0] - 1; ++ix) { angle = angle < 0.0f ? angle += 180.0f : angle;
float angle = preprocessing_get_4d(D, ix, iy, 0, 0) * 180.0f / M_PI_;
angle = angle < 0.0f ? angle + 180.0f : angle;
float q = 1.0f; float q = 1.0f;
float r = 1.0f; float r = 1.0f;
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180.0f)) { // angle 0
q = preprocessing_get_4d(G, ix, iy + 1, 0, 0); if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180)) {
r = preprocessing_get_4d(G, ix, iy - 1, 0, 0); q = ggml_ext_tensor_get_f32(G, ix, iy + 1);
} else if (22.5f >= angle && angle < 67.5f) { r = ggml_ext_tensor_get_f32(G, ix, iy - 1);
q = preprocessing_get_4d(G, ix + 1, iy - 1, 0, 0); }
r = preprocessing_get_4d(G, ix - 1, iy + 1, 0, 0); // angle 45
} else if (67.5f >= angle && angle < 112.5f) { else if (22.5f >= angle && angle < 67.5f) {
q = preprocessing_get_4d(G, ix + 1, iy, 0, 0); q = ggml_ext_tensor_get_f32(G, ix + 1, iy - 1);
r = preprocessing_get_4d(G, ix - 1, iy, 0, 0); r = ggml_ext_tensor_get_f32(G, ix - 1, iy + 1);
} else if (112.5f >= angle && angle < 157.5f) { }
q = preprocessing_get_4d(G, ix - 1, iy - 1, 0, 0); // angle 90
r = preprocessing_get_4d(G, ix + 1, iy + 1, 0, 0); else if (67.5f >= angle && angle < 112.5) {
q = ggml_ext_tensor_get_f32(G, ix + 1, iy);
r = ggml_ext_tensor_get_f32(G, ix - 1, iy);
}
// angle 135
else if (112.5 >= angle && angle < 157.5f) {
q = ggml_ext_tensor_get_f32(G, ix - 1, iy - 1);
r = ggml_ext_tensor_get_f32(G, ix + 1, iy + 1);
} }
float cur = preprocessing_get_4d(G, ix, iy, 0, 0); float cur = ggml_ext_tensor_get_f32(G, ix, iy);
preprocessing_set_4d(result, (cur >= q && cur >= r) ? cur : 0.0f, ix, iy, 0, 0); if ((cur >= q) && (cur >= r)) {
ggml_ext_tensor_set_f32(result, cur, ix, iy);
} else {
ggml_ext_tensor_set_f32(result, 0.0f, ix, iy);
}
} }
} }
return result;
} }
static inline void threshold_hystersis(sd::Tensor<float>* img, float high_threshold, float low_threshold, float weak, float strong) { void threshold_hystersis(ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) {
GGML_ASSERT(img != nullptr); int n_elements = static_cast<int>(ggml_nelements(img));
if (img->empty()) { float* imd = (float*)img->data;
return; float max = -INFINITY;
for (int i = 0; i < n_elements; i++) {
max = imd[i] > max ? imd[i] : max;
} }
float max_value = -std::numeric_limits<float>::infinity(); float ht = max * high_threshold;
for (int64_t i = 0; i < img->numel(); ++i) {
max_value = std::max(max_value, (*img)[i]);
}
float ht = max_value * high_threshold;
float lt = ht * low_threshold; float lt = ht * low_threshold;
for (int64_t i = 0; i < img->numel(); ++i) { for (int i = 0; i < n_elements; i++) {
float img_v = (*img)[i]; float img_v = imd[i];
if (img_v >= ht) { if (img_v >= ht) { // strong pixel
(*img)[i] = strong; imd[i] = strong;
} else if (img_v <= ht && img_v >= lt) { } else if (img_v <= ht && img_v >= lt) { // strong pixel
(*img)[i] = weak; imd[i] = weak;
} }
} }
for (int64_t iy = 0; iy < img->shape()[1]; ++iy) { for (int iy = 0; iy < img->ne[1]; iy++) {
for (int64_t ix = 0; ix < img->shape()[0]; ++ix) { for (int ix = 0; ix < img->ne[0]; ix++) {
if (!(ix >= 3 && ix <= img->shape()[0] - 3 && iy >= 3 && iy <= img->shape()[1] - 3)) { if (ix >= 3 && ix <= img->ne[0] - 3 && iy >= 3 && iy <= img->ne[1] - 3) {
preprocessing_set_4d(*img, 0.0f, ix, iy, 0, 0); ggml_ext_tensor_set_f32(img, ggml_ext_tensor_get_f32(img, ix, iy), ix, iy);
} else {
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
} }
} }
} }
for (int64_t iy = 1; iy < img->shape()[1] - 1; ++iy) { // hysteresis
for (int64_t ix = 1; ix < img->shape()[0] - 1; ++ix) { for (int iy = 1; iy < img->ne[1] - 1; iy++) {
float imd_v = preprocessing_get_4d(*img, ix, iy, 0, 0); for (int ix = 1; ix < img->ne[0] - 1; ix++) {
float imd_v = ggml_ext_tensor_get_f32(img, ix, iy);
if (imd_v == weak) { if (imd_v == weak) {
bool has_strong_neighbor = if (ggml_ext_tensor_get_f32(img, ix + 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix + 1, iy) == strong ||
preprocessing_get_4d(*img, ix + 1, iy - 1, 0, 0) == strong || ggml_ext_tensor_get_f32(img, ix, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix, iy + 1) == strong ||
preprocessing_get_4d(*img, ix + 1, iy, 0, 0) == strong || ggml_ext_tensor_get_f32(img, ix - 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix - 1, iy) == strong) {
preprocessing_get_4d(*img, ix, iy - 1, 0, 0) == strong || ggml_ext_tensor_set_f32(img, strong, ix, iy);
preprocessing_get_4d(*img, ix, iy + 1, 0, 0) == strong || } else {
preprocessing_get_4d(*img, ix - 1, iy - 1, 0, 0) == strong || ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
preprocessing_get_4d(*img, ix - 1, iy, 0, 0) == strong; }
preprocessing_set_4d(*img, has_strong_neighbor ? strong : 0.0f, ix, iy, 0, 0);
} }
} }
} }
} }
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) { bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
ggml_init_params params;
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
if (!work_ctx) {
LOG_ERROR("ggml_init() failed");
return false;
}
float kX[9] = { float kX[9] = {
-1, 0, 1, -1, 0, 1,
-2, 0, 2, -2, 0, 2,
@ -246,33 +184,43 @@ bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold,
0, 0, 0, 0, 0, 0,
-1, -2, -1}; -1, -2, -1};
sd::Tensor<float> gkernel = gaussian_kernel_tensor(5); // generate kernel
sd::Tensor<float> sf_kx({3, 3, 1, 1}, std::vector<float>(kX, kX + 9)); int kernel_size = 5;
sd::Tensor<float> sf_ky({3, 3, 1, 1}, std::vector<float>(kY, kY + 9)); ggml_tensor* gkernel = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, kernel_size, kernel_size, 1, 1);
ggml_tensor* sf_kx = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
sd::Tensor<float> image = sd_image_to_preprocessing_tensor(img); memcpy(sf_kx->data, kX, ggml_nbytes(sf_kx));
sd::Tensor<float> image_gray = grayscale_tensor(image); ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
image_gray = convolve_tensor(image_gray, gkernel, 2); memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
sd::Tensor<float> iX = convolve_tensor(image_gray, sf_kx, 1); gaussian_kernel(gkernel);
sd::Tensor<float> iY = convolve_tensor(image_gray, sf_ky, 1); ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
sd::Tensor<float> G = tensor_hypot(iX, iY); ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
normalize_tensor(&G); ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
sd::Tensor<float> theta = tensor_arctan2(iX, iY); ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
image_gray = non_max_supression(G, theta); ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
threshold_hystersis(&image_gray, high_threshold, low_threshold, weak, strong); ggml_tensor* tetha = ggml_dup_tensor(work_ctx, image_gray);
sd_image_to_ggml_tensor(img, image);
for (uint32_t iy = 0; iy < img.height; ++iy) { grayscale(image, image_gray);
for (uint32_t ix = 0; ix < img.width; ++ix) { convolve(image_gray, image_gray, gkernel, 2);
float gray = preprocessing_get_4d(image_gray, ix, iy, 0, 0); convolve(image_gray, iX, sf_kx, 1);
convolve(image_gray, iY, sf_ky, 1);
prop_hypot(iX, iY, G);
normalize_tensor(G);
prop_arctan2(iX, iY, tetha);
non_max_supression(image_gray, G, tetha);
threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong);
// to RGB channels
for (uint32_t iy = 0; iy < img.height; iy++) {
for (uint32_t ix = 0; ix < img.width; ix++) {
float gray = ggml_ext_tensor_get_f32(image_gray, ix, iy);
gray = inverse ? 1.0f - gray : gray; gray = inverse ? 1.0f - gray : gray;
for (uint32_t c = 0; c < img.channel; ++c) { ggml_ext_tensor_set_f32(image, gray, ix, iy);
preprocessing_set_4d(image, gray, ix, iy, c, 0); ggml_ext_tensor_set_f32(image, gray, ix, iy, 1);
} ggml_ext_tensor_set_f32(image, gray, ix, iy, 2);
} }
} }
ggml_tensor_to_sd_image(image, img.data);
preprocessing_tensor_to_sd_image(image, img.data); ggml_free(work_ctx);
return true; return true;
} }
#endif // __PREPROCESSING_HPP__ #endif // __PREPROCESSING_HPP__

View File

@ -525,21 +525,20 @@ namespace Qwen {
qwen_image.get_param_tensors(tensors, prefix); qwen_image.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor, ggml_tensor* context,
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false) { bool increase_ref_index = false) {
ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
GGML_ASSERT(!context_tensor.empty()); ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
ggml_tensor* context = make_input(context_tensor);
std::vector<ggml_tensor*> ref_latents; x = to_backend(x);
ref_latents.reserve(ref_latents_tensor.size()); context = to_backend(context);
for (const auto& ref_latent_tensor : ref_latents_tensor) { timesteps = to_backend(timesteps);
ref_latents.push_back(make_input(ref_latent_tensor));
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
} }
pe_vec = Rope::gen_qwen_image_pe(static_cast<int>(x->ne[1]), pe_vec = Rope::gen_qwen_image_pe(static_cast<int>(x->ne[1]),
@ -601,12 +600,14 @@ namespace Qwen {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context, ggml_tensor* context,
const std::vector<sd::Tensor<float>>& ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false) { bool increase_ref_index = false,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
@ -614,7 +615,7 @@ namespace Qwen {
return build_graph(x, timesteps, context, ref_latents, increase_ref_index); return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
void test() { void test() {
@ -623,37 +624,30 @@ namespace Qwen {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1); // auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
auto x = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_x.bin"); auto x = load_tensor_from_file(work_ctx, "./qwen_image_x.bin");
print_sd_tensor(x); print_ggml_tensor(x);
std::vector<float> timesteps_vec(1, 1000.f); std::vector<float> timesteps_vec(1, 1000.f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec); auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 3584, 256, 1); // auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 3584, 256, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
auto context = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_context.bin"); auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin");
print_sd_tensor(context); print_ggml_tensor(context);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, compute(8, x, timesteps, context, {}, false, &out, work_ctx);
x, int64_t t1 = ggml_time_ms();
timesteps,
context,
{},
false);
int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("qwen_image test done in %lldms", t1 - t0); LOG_DEBUG("qwen_image test done in %lldms", t1 - t0);
} }
} }

View File

@ -1,361 +0,0 @@
#include "sample-cache.h"
namespace sd_sample {
static float get_cache_reuse_threshold(const sd_cache_params_t& params) {
float reuse_threshold = params.reuse_threshold;
if (reuse_threshold == INFINITY) {
if (params.mode == SD_CACHE_EASYCACHE) {
reuse_threshold = 0.2f;
} else if (params.mode == SD_CACHE_UCACHE) {
reuse_threshold = 1.0f;
}
}
return std::max(0.0f, reuse_threshold);
}
bool SampleCacheRuntime::easycache_enabled() const {
return mode == SampleCacheMode::EASYCACHE;
}
bool SampleCacheRuntime::ucache_enabled() const {
return mode == SampleCacheMode::UCACHE;
}
bool SampleCacheRuntime::cachedit_enabled() const {
return mode == SampleCacheMode::CACHEDIT;
}
static bool has_valid_cache_percent_range(const sd_cache_params_t& cache_params) {
if (cache_params.mode != SD_CACHE_EASYCACHE && cache_params.mode != SD_CACHE_UCACHE) {
return true;
}
return cache_params.start_percent >= 0.0f &&
cache_params.start_percent < 1.0f &&
cache_params.end_percent > 0.0f &&
cache_params.end_percent <= 1.0f &&
cache_params.start_percent < cache_params.end_percent;
}
static void init_easycache_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
Denoiser* denoiser) {
if (!sd_version_is_dit(version)) {
LOG_WARN("EasyCache requested but not supported for this model type");
return;
}
EasyCacheConfig config;
config.enabled = true;
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
config.start_percent = cache_params.start_percent;
config.end_percent = cache_params.end_percent;
runtime.easycache.init(config, denoiser);
if (!runtime.easycache.enabled()) {
LOG_WARN("EasyCache requested but could not be initialized for this run");
return;
}
runtime.mode = SampleCacheMode::EASYCACHE;
LOG_INFO("EasyCache enabled - threshold: %.3f, start: %.2f, end: %.2f",
config.reuse_threshold,
config.start_percent,
config.end_percent);
}
static void init_ucache_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas) {
if (!sd_version_is_unet(version)) {
LOG_WARN("UCache requested but not supported for this model type (only UNET models)");
return;
}
UCacheConfig config;
config.enabled = true;
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
config.start_percent = cache_params.start_percent;
config.end_percent = cache_params.end_percent;
config.error_decay_rate = std::max(0.0f, std::min(1.0f, cache_params.error_decay_rate));
config.use_relative_threshold = cache_params.use_relative_threshold;
config.reset_error_on_compute = cache_params.reset_error_on_compute;
runtime.ucache.init(config, denoiser);
if (!runtime.ucache.enabled()) {
LOG_WARN("UCache requested but could not be initialized for this run");
return;
}
runtime.ucache.set_sigmas(sigmas);
runtime.mode = SampleCacheMode::UCACHE;
LOG_INFO("UCache enabled - threshold: %.3f, start: %.2f, end: %.2f, decay: %.2f, relative: %s, reset: %s",
config.reuse_threshold,
config.start_percent,
config.end_percent,
config.error_decay_rate,
config.use_relative_threshold ? "true" : "false",
config.reset_error_on_compute ? "true" : "false");
}
static void init_cachedit_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
const std::vector<float>& sigmas) {
if (!sd_version_is_dit(version)) {
LOG_WARN("CacheDIT requested but not supported for this model type (only DiT models)");
return;
}
DBCacheConfig dbcfg;
dbcfg.enabled = (cache_params.mode == SD_CACHE_DBCACHE || cache_params.mode == SD_CACHE_CACHE_DIT);
dbcfg.Fn_compute_blocks = cache_params.Fn_compute_blocks;
dbcfg.Bn_compute_blocks = cache_params.Bn_compute_blocks;
dbcfg.residual_diff_threshold = cache_params.residual_diff_threshold;
dbcfg.max_warmup_steps = cache_params.max_warmup_steps;
dbcfg.max_cached_steps = cache_params.max_cached_steps;
dbcfg.max_continuous_cached_steps = cache_params.max_continuous_cached_steps;
if (cache_params.scm_mask != nullptr && strlen(cache_params.scm_mask) > 0) {
dbcfg.steps_computation_mask = parse_scm_mask(cache_params.scm_mask);
}
dbcfg.scm_policy_dynamic = cache_params.scm_policy_dynamic;
TaylorSeerConfig tcfg;
tcfg.enabled = (cache_params.mode == SD_CACHE_TAYLORSEER || cache_params.mode == SD_CACHE_CACHE_DIT);
tcfg.n_derivatives = cache_params.taylorseer_n_derivatives;
tcfg.skip_interval_steps = cache_params.taylorseer_skip_interval;
runtime.cachedit.init(dbcfg, tcfg);
if (!runtime.cachedit.enabled()) {
LOG_WARN("CacheDIT requested but could not be initialized for this run");
return;
}
runtime.cachedit.set_sigmas(sigmas);
runtime.mode = SampleCacheMode::CACHEDIT;
LOG_INFO("CacheDIT enabled - mode: %s, Fn: %d, Bn: %d, threshold: %.3f, warmup: %d",
cache_params.mode == SD_CACHE_CACHE_DIT ? "DBCache+TaylorSeer" : (cache_params.mode == SD_CACHE_DBCACHE ? "DBCache" : "TaylorSeer"),
dbcfg.Fn_compute_blocks,
dbcfg.Bn_compute_blocks,
dbcfg.residual_diff_threshold,
dbcfg.max_warmup_steps);
}
static void init_spectrum_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
const std::vector<float>& sigmas) {
if (!sd_version_is_unet(version) && !sd_version_is_dit(version)) {
LOG_WARN("Spectrum requested but not supported for this model type (only UNET and DiT models)");
return;
}
SpectrumConfig config;
config.w = cache_params.spectrum_w;
config.m = cache_params.spectrum_m;
config.lam = cache_params.spectrum_lam;
config.window_size = cache_params.spectrum_window_size;
config.flex_window = cache_params.spectrum_flex_window;
config.warmup_steps = cache_params.spectrum_warmup_steps;
config.stop_percent = cache_params.spectrum_stop_percent;
size_t total_steps = sigmas.size() > 0 ? sigmas.size() - 1 : 0;
runtime.spectrum.init(config, total_steps);
runtime.spectrum_enabled = true;
LOG_INFO("Spectrum enabled - w: %.2f, m: %d, lam: %.2f, window: %d, flex: %.2f, warmup: %d, stop: %.0f%%",
config.w, config.m, config.lam,
config.window_size, config.flex_window,
config.warmup_steps, config.stop_percent * 100.0f);
}
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
const sd_cache_params_t* cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas) {
SampleCacheRuntime runtime;
if (cache_params == nullptr || cache_params->mode == SD_CACHE_DISABLED) {
return runtime;
}
if (!has_valid_cache_percent_range(*cache_params)) {
LOG_WARN("Cache disabled due to invalid percent range (start=%.3f, end=%.3f)",
cache_params->start_percent,
cache_params->end_percent);
return runtime;
}
switch (cache_params->mode) {
case SD_CACHE_EASYCACHE:
init_easycache_runtime(runtime, version, *cache_params, denoiser);
break;
case SD_CACHE_UCACHE:
init_ucache_runtime(runtime, version, *cache_params, denoiser, sigmas);
break;
case SD_CACHE_DBCACHE:
case SD_CACHE_TAYLORSEER:
case SD_CACHE_CACHE_DIT:
init_cachedit_runtime(runtime, version, *cache_params, sigmas);
break;
case SD_CACHE_SPECTRUM:
init_spectrum_runtime(runtime, version, *cache_params, sigmas);
break;
default:
break;
}
return runtime;
}
SampleStepCacheDispatcher::SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma)
: runtime(runtime), step(step), sigma(sigma), step_index(step > 0 ? (step - 1) : -1) {
if (step_index < 0) {
return;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
runtime.easycache.begin_step(step_index, sigma);
break;
case SampleCacheMode::UCACHE:
runtime.ucache.begin_step(step_index, sigma);
break;
case SampleCacheMode::CACHEDIT:
runtime.cachedit.begin_step(step_index, sigma);
break;
case SampleCacheMode::NONE:
break;
}
}
bool SampleStepCacheDispatcher::before_condition(const void* condition,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
if (step_index < 0 || condition == nullptr || output == nullptr) {
return false;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
return runtime.easycache.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::UCACHE:
return runtime.ucache.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::CACHEDIT:
return runtime.cachedit.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::NONE:
return false;
}
return false;
}
void SampleStepCacheDispatcher::after_condition(const void* condition,
const sd::Tensor<float>& input,
const sd::Tensor<float>& output) {
if (step_index < 0 || condition == nullptr) {
return;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
runtime.easycache.after_condition(condition, input, output);
break;
case SampleCacheMode::UCACHE:
runtime.ucache.after_condition(condition, input, output);
break;
case SampleCacheMode::CACHEDIT:
runtime.cachedit.after_condition(condition, input, output);
break;
case SampleCacheMode::NONE:
break;
}
}
bool SampleStepCacheDispatcher::is_step_skipped() const {
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
return runtime.easycache.is_step_skipped();
case SampleCacheMode::UCACHE:
return runtime.ucache.is_step_skipped();
case SampleCacheMode::CACHEDIT:
return runtime.cachedit.is_step_skipped();
case SampleCacheMode::NONE:
return false;
}
return false;
}
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps) {
if (runtime.easycache_enabled()) {
if (runtime.easycache.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.easycache.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.easycache.total_steps_skipped);
LOG_INFO("EasyCache skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.easycache.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("EasyCache skipped %d/%zu steps",
runtime.easycache.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("EasyCache completed without skipping steps");
}
}
if (runtime.ucache_enabled()) {
if (runtime.ucache.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.ucache.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.ucache.total_steps_skipped);
LOG_INFO("UCache skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.ucache.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("UCache skipped %d/%zu steps",
runtime.ucache.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("UCache completed without skipping steps");
}
}
if (runtime.cachedit_enabled()) {
if (runtime.cachedit.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.cachedit.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.cachedit.total_steps_skipped);
LOG_INFO("CacheDIT skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.cachedit.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("CacheDIT skipped %d/%zu steps",
runtime.cachedit.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("CacheDIT completed without skipping steps");
}
}
if (runtime.spectrum_enabled && runtime.spectrum.total_steps_skipped > 0 && total_steps > 0) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.spectrum.total_steps_skipped);
LOG_INFO("Spectrum skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.spectrum.total_steps_skipped,
total_steps,
speedup);
}
}
} // namespace sd_sample

View File

@ -1,61 +0,0 @@
#ifndef __SAMPLE_CACHE_H__
#define __SAMPLE_CACHE_H__
#include <vector>
#include "cache_dit.hpp"
#include "denoiser.hpp"
#include "easycache.hpp"
#include "model.h"
#include "spectrum.hpp"
#include "tensor.hpp"
#include "ucache.hpp"
#include "util.h"
namespace sd_sample {
enum class SampleCacheMode {
NONE,
EASYCACHE,
UCACHE,
CACHEDIT,
};
struct SampleCacheRuntime {
SampleCacheMode mode = SampleCacheMode::NONE;
EasyCacheState easycache;
UCacheState ucache;
CacheDitConditionState cachedit;
SpectrumState spectrum;
bool spectrum_enabled = false;
bool easycache_enabled() const;
bool ucache_enabled() const;
bool cachedit_enabled() const;
};
struct SampleStepCacheDispatcher {
SampleCacheRuntime& runtime;
int step;
float sigma;
int step_index;
SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma);
bool before_condition(const void* condition, const sd::Tensor<float>& input, sd::Tensor<float>* output);
void after_condition(const void* condition, const sd::Tensor<float>& input, const sd::Tensor<float>& output);
bool is_step_skipped() const;
};
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
const sd_cache_params_t* cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas);
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps);
} // namespace sd_sample
#endif // __SAMPLE_CACHE_H__

View File

@ -6,7 +6,6 @@
#include <vector> #include <vector>
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct SpectrumConfig { struct SpectrumConfig {
float w = 0.40f; float w = 0.40f;
@ -58,8 +57,11 @@ struct SpectrumState {
return (num_cached + 1) % ws != 0; return (num_cached + 1) % ws != 0;
} }
void update(const sd::Tensor<float>& denoised) { void update(const ggml_tensor* denoised) {
H_buf.emplace_back(denoised.data(), denoised.data() + denoised.numel()); int64_t ne = ggml_nelements(denoised);
const float* data = (const float*)denoised->data;
H_buf.emplace_back(data, data + ne);
T_buf.push_back(taus(cnt)); T_buf.push_back(taus(cnt));
while ((int)H_buf.size() > K) { while ((int)H_buf.size() > K) {
@ -74,13 +76,13 @@ struct SpectrumState {
cnt++; cnt++;
} }
void predict(sd::Tensor<float>* denoised) { void predict(ggml_tensor* denoised) {
GGML_ASSERT(denoised != nullptr);
int64_t F = (int64_t)H_buf[0].size(); int64_t F = (int64_t)H_buf[0].size();
int K_curr = (int)H_buf.size(); int K_curr = (int)H_buf.size();
int M1 = config.m + 1; int M1 = config.m + 1;
float tau_at = taus(cnt); float tau_at = taus(cnt);
// Design matrix X: K_curr x M1 (Chebyshev basis)
std::vector<float> X(K_curr * M1); std::vector<float> X(K_curr * M1);
for (int i = 0; i < K_curr; i++) { for (int i = 0; i < K_curr; i++) {
X[i * M1] = 1.0f; X[i * M1] = 1.0f;
@ -90,6 +92,7 @@ struct SpectrumState {
X[i * M1 + j] = 2.0f * T_buf[i] * X[i * M1 + j - 1] - X[i * M1 + j - 2]; X[i * M1 + j] = 2.0f * T_buf[i] * X[i * M1 + j - 1] - X[i * M1 + j - 2];
} }
// x_star: Chebyshev basis at current tau
std::vector<float> x_star(M1); std::vector<float> x_star(M1);
x_star[0] = 1.0f; x_star[0] = 1.0f;
if (M1 > 1) if (M1 > 1)
@ -97,6 +100,7 @@ struct SpectrumState {
for (int j = 2; j < M1; j++) for (int j = 2; j < M1; j++)
x_star[j] = 2.0f * tau_at * x_star[j - 1] - x_star[j - 2]; x_star[j] = 2.0f * tau_at * x_star[j - 1] - x_star[j - 2];
// XtX = X^T X + lambda I
std::vector<float> XtX(M1 * M1, 0.0f); std::vector<float> XtX(M1 * M1, 0.0f);
for (int i = 0; i < M1; i++) { for (int i = 0; i < M1; i++) {
for (int j = 0; j < M1; j++) { for (int j = 0; j < M1; j++) {
@ -107,6 +111,7 @@ struct SpectrumState {
} }
} }
// Cholesky decomposition
std::vector<float> L(M1 * M1, 0.0f); std::vector<float> L(M1 * M1, 0.0f);
if (!cholesky_decompose(XtX.data(), L.data(), M1)) { if (!cholesky_decompose(XtX.data(), L.data(), M1)) {
float trace = 0.0f; float trace = 0.0f;
@ -117,15 +122,18 @@ struct SpectrumState {
cholesky_decompose(XtX.data(), L.data(), M1); cholesky_decompose(XtX.data(), L.data(), M1);
} }
// Solve XtX v = x_star
std::vector<float> v(M1); std::vector<float> v(M1);
cholesky_solve(L.data(), x_star.data(), v.data(), M1); cholesky_solve(L.data(), x_star.data(), v.data(), M1);
// Prediction weights per history entry
std::vector<float> weights(K_curr, 0.0f); std::vector<float> weights(K_curr, 0.0f);
for (int k = 0; k < K_curr; k++) for (int k = 0; k < K_curr; k++)
for (int j = 0; j < M1; j++) for (int j = 0; j < M1; j++)
weights[k] += X[k * M1 + j] * v[j]; weights[k] += X[k * M1 + j] * v[j];
float* out = denoised->data(); // Blend Chebyshev and Taylor predictions
float* out = (float*)denoised->data;
float w_cheb = config.w; float w_cheb = config.w;
float w_taylor = 1.0f - w_cheb; float w_taylor = 1.0f - w_cheb;
const float* h_last = H_buf.back().data(); const float* h_last = H_buf.back().data();

File diff suppressed because it is too large Load Diff

2074
src/t5.hpp

File diff suppressed because it is too large Load Diff

View File

@ -562,40 +562,41 @@ struct TinyImageAutoEncoder : public VAE {
taesd.get_param_tensors(tensors, prefix); taesd.get_param_tensors(tensors, prefix);
} }
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override { ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
SD_UNUSED(rng);
return vae_output; return vae_output;
} }
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override { ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return latents; return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
} }
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override { ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return latents; return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
} }
int get_encoder_output_channels(int input_channels) { int get_encoder_output_channels(int input_channels) {
return taesd.z_channels; return taesd.z_channels;
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) { ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* z = make_input(z_tensor); z = to_backend(z);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z); ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
sd::Tensor<float> _compute(const int n_threads, bool _compute(const int n_threads,
const sd::Tensor<float>& z_tensor, ggml_tensor* z,
bool decode_graph) override { bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z_tensor, decode_graph); return build_graph(z, decode_graph);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
}; };
@ -624,41 +625,42 @@ struct TinyVideoAutoEncoder : public VAE {
taehv.get_param_tensors(tensors, prefix); taehv.get_param_tensors(tensors, prefix);
} }
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override { ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
SD_UNUSED(rng);
return vae_output; return vae_output;
} }
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override { ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return latents; return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
} }
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override { ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return latents; return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
} }
int get_encoder_output_channels(int input_channels) { int get_encoder_output_channels(int input_channels) {
return taehv.z_channels; return taehv.z_channels;
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) { ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx); ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* z = make_input(z_tensor); z = to_backend(z);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z); ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out); ggml_build_forward_expand(gf, out);
return gf; return gf;
} }
sd::Tensor<float> _compute(const int n_threads, bool _compute(const int n_threads,
const sd::Tensor<float>& z_tensor, ggml_tensor* z,
bool decode_graph) override { bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z_tensor, decode_graph); return build_graph(z, decode_graph);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
}; };
#endif // __TAE_HPP__ #endif // __TAE_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -1,127 +0,0 @@
#ifndef __SD_TENSOR_GGML_HPP__
#define __SD_TENSOR_GGML_HPP__
#include <array>
#include <cstring>
#include <fstream>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "ggml.h"
#include "tensor.hpp"
namespace sd {
template <typename T>
struct GGMLTypeTraits;
template <>
struct GGMLTypeTraits<float> {
static constexpr ggml_type type = GGML_TYPE_F32;
};
template <>
struct GGMLTypeTraits<ggml_fp16_t> {
static constexpr ggml_type type = GGML_TYPE_F16;
};
template <>
struct GGMLTypeTraits<int32_t> {
static constexpr ggml_type type = GGML_TYPE_I32;
};
template <>
struct GGMLTypeTraits<int64_t> {
static constexpr ggml_type type = GGML_TYPE_I64;
};
inline std::vector<int64_t> shape_from_ggml(const ggml_tensor* tensor) {
std::vector<int64_t> shape;
shape.reserve(static_cast<size_t>(ggml_n_dims(tensor)));
for (int i = 0; i < ggml_n_dims(tensor); ++i) {
shape.push_back(tensor->ne[i]);
}
return shape;
}
template <typename T>
inline Tensor<T> make_sd_tensor_from_ggml(const ggml_tensor* tensor) {
if (tensor == nullptr) {
return {};
}
if (tensor->type != GGMLTypeTraits<T>::type) {
GGML_ABORT("ggml tensor type does not match sd::Tensor type");
}
Tensor<T> result(shape_from_ggml(tensor));
if (tensor->buffer != nullptr) {
ggml_backend_tensor_get(tensor, result.data(), 0, ggml_nbytes(tensor));
} else {
std::memcpy(result.data(), tensor->data, ggml_nbytes(tensor));
}
return result;
}
template <typename T>
inline ggml_tensor* make_ggml_tensor(ggml_context* ctx, const Tensor<T>& tensor, bool copy_data = true) {
GGML_ASSERT(tensor.dim() > 0 && tensor.dim() <= 5);
int n_dims = std::min(static_cast<int>(tensor.dim()), GGML_MAX_DIMS);
std::array<int64_t, GGML_MAX_DIMS> ne = {1, 1, 1, 1};
for (int64_t i = 0; i < n_dims; ++i) {
ne[static_cast<size_t>(i)] = tensor.shape()[static_cast<size_t>(i)];
}
if (tensor.dim() == 5) {
ne[3] *= tensor.shape()[4];
}
ggml_tensor* result = ggml_new_tensor(ctx, GGMLTypeTraits<T>::type, n_dims, ne.data());
if (copy_data && tensor.numel() > 0) {
std::memcpy(result->data, tensor.data(), static_cast<size_t>(ggml_nbytes(result)));
}
return result;
}
template <typename T>
inline Tensor<T> load_tensor_from_file_as_tensor(const std::string& file_path) {
std::ifstream file(file_path, std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open tensor file: " + file_path);
}
int32_t n_dims = 0;
int32_t length = 0;
int32_t ttype = 0;
file.read(reinterpret_cast<char*>(&n_dims), sizeof(n_dims));
file.read(reinterpret_cast<char*>(&length), sizeof(length));
file.read(reinterpret_cast<char*>(&ttype), sizeof(ttype));
if (!file.good()) {
throw std::runtime_error("incomplete tensor file header: " + file_path);
}
if (static_cast<ggml_type>(ttype) != GGMLTypeTraits<T>::type) {
throw std::invalid_argument("tensor file type does not match requested sd::Tensor type");
}
std::vector<int64_t> shape(4, 1);
for (int i = 0; i < n_dims; ++i) {
int32_t dim = 1;
file.read(reinterpret_cast<char*>(&dim), sizeof(dim));
shape[static_cast<size_t>(i)] = dim;
}
std::string name(static_cast<size_t>(length), '\0');
file.read(name.data(), length);
shape.resize(static_cast<size_t>(n_dims));
Tensor<T> tensor(shape);
file.read(reinterpret_cast<char*>(tensor.data()), static_cast<std::streamsize>(tensor.numel() * sizeof(T)));
if (!file.good()) {
throw std::runtime_error("incomplete tensor file data: " + file_path);
}
return tensor;
}
} // namespace sd
#endif

File diff suppressed because it is too large Load Diff

View File

@ -6,10 +6,8 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "condition_cache_utils.hpp"
#include "denoiser.hpp" #include "denoiser.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "tensor.hpp"
struct UCacheConfig { struct UCacheConfig {
bool enabled = false; bool enabled = false;
@ -31,15 +29,15 @@ struct UCacheCacheEntry {
struct UCacheState { struct UCacheState {
UCacheConfig config; UCacheConfig config;
Denoiser* denoiser = nullptr; Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max(); float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f; float end_sigma = 0.0f;
bool initialized = false; bool initialized = false;
bool initial_step = true; bool initial_step = true;
bool skip_current_step = false; bool skip_current_step = false;
bool step_active = false; bool step_active = false;
const void* anchor_condition = nullptr; const SDCondition* anchor_condition = nullptr;
std::unordered_map<const void*, UCacheCacheEntry> cache_diffs; std::unordered_map<const SDCondition*, UCacheCacheEntry> cache_diffs;
std::vector<float> prev_input; std::vector<float> prev_input;
std::vector<float> prev_output; std::vector<float> prev_output;
float output_prev_norm = 0.0f; float output_prev_norm = 0.0f;
@ -235,30 +233,43 @@ struct UCacheState {
return base_threshold * multiplier; return base_threshold * multiplier;
} }
bool has_cache(const void* cond) const { bool has_cache(const SDCondition* cond) const {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
return it != cache_diffs.end() && !it->second.diff.empty(); return it != cache_diffs.end() && !it->second.diff.empty();
} }
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) { void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
UCacheCacheEntry& entry = cache_diffs[cond]; UCacheCacheEntry& entry = cache_diffs[cond];
sd::store_condition_cache_diff(&entry.diff, input, output); size_t ne = static_cast<size_t>(ggml_nelements(output));
entry.diff.resize(ne);
float* out_data = (float*)output->data;
float* in_data = (float*)input->data;
for (size_t i = 0; i < ne; ++i) {
entry.diff[i] = out_data[i] - in_data[i];
}
} }
void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) { void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
auto it = cache_diffs.find(cond); auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) { if (it == cache_diffs.end() || it->second.diff.empty()) {
return; return;
} }
sd::apply_condition_cache_diff(it->second.diff, input, output);
copy_ggml_tensor(output, input);
float* out_data = (float*)output->data;
const std::vector<float>& diff = it->second.diff;
for (size_t i = 0; i < diff.size(); ++i) {
out_data[i] += diff[i];
}
} }
bool before_condition(const void* cond, bool before_condition(const SDCondition* cond,
const sd::Tensor<float>& input, ggml_tensor* input,
sd::Tensor<float>* output, ggml_tensor* output,
float sigma, float sigma,
int step_index) { int step_index) {
if (!enabled() || step_index < 0 || output == nullptr) { if (!enabled() || step_index < 0) {
return false; return false;
} }
if (step_index != current_step_index) { if (step_index != current_step_index) {
@ -291,13 +302,13 @@ struct UCacheState {
return false; return false;
} }
size_t ne = static_cast<size_t>(input.numel()); size_t ne = static_cast<size_t>(ggml_nelements(input));
if (prev_input.size() != ne) { if (prev_input.size() != ne) {
return false; return false;
} }
const float* input_data = input.data(); float* input_data = (float*)input->data;
last_input_change = 0.0f; last_input_change = 0.0f;
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
last_input_change += std::fabs(input_data[i] - prev_input[i]); last_input_change += std::fabs(input_data[i] - prev_input[i]);
} }
@ -343,7 +354,7 @@ struct UCacheState {
return false; return false;
} }
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) { void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
if (!step_is_active()) { if (!step_is_active()) {
return; return;
} }
@ -356,16 +367,16 @@ struct UCacheState {
steps_computed_since_active++; steps_computed_since_active++;
consecutive_skipped_steps = 0; consecutive_skipped_steps = 0;
size_t ne = static_cast<size_t>(input.numel()); size_t ne = static_cast<size_t>(ggml_nelements(input));
const float* in_data = input.data(); float* in_data = (float*)input->data;
prev_input.resize(ne); prev_input.resize(ne);
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
prev_input[i] = in_data[i]; prev_input[i] = in_data[i];
} }
has_prev_input = true; has_prev_input = true;
const float* out_data = output.data(); float* out_data = (float*)output->data;
float output_change = 0.0f; float output_change = 0.0f;
if (has_prev_output && prev_output.size() == ne) { if (has_prev_output && prev_output.size() == ne) {
for (size_t i = 0; i < ne; ++i) { for (size_t i = 0; i < ne; ++i) {
output_change += std::fabs(out_data[i] - prev_output[i]); output_change += std::fabs(out_data[i] - prev_output[i]);

View File

@ -609,31 +609,30 @@ struct UNetModelRunner : public GGMLRunner {
unet.get_param_tensors(tensors, prefix); unet.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor = {}, ggml_tensor* context,
const sd::Tensor<float>& c_concat_tensor = {}, ggml_tensor* c_concat = nullptr,
const sd::Tensor<float>& y_tensor = {}, ggml_tensor* y = nullptr,
int num_video_frames = -1, int num_video_frames = -1,
const std::vector<sd::Tensor<float>>& controls_tensor = {}, std::vector<ggml_tensor*> controls = {},
float control_strength = 0.f) { float control_strength = 0.f) {
ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
std::vector<ggml_tensor*> controls;
controls.reserve(controls_tensor.size());
for (const auto& control_tensor : controls_tensor) {
controls.push_back(make_input(control_tensor));
}
if (num_video_frames == -1) { if (num_video_frames == -1) {
num_video_frames = static_cast<int>(x->ne[3]); num_video_frames = static_cast<int>(x->ne[3]);
} }
x = to_backend(x);
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
c_concat = to_backend(c_concat);
for (int i = 0; i < controls.size(); i++) {
controls[i] = to_backend(controls[i]);
}
auto runner_ctx = get_context(); auto runner_ctx = get_context();
ggml_tensor* out = unet.forward(&runner_ctx, ggml_tensor* out = unet.forward(&runner_ctx,
@ -651,15 +650,17 @@ struct UNetModelRunner : public GGMLRunner {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context = {}, ggml_tensor* context,
const sd::Tensor<float>& c_concat = {}, ggml_tensor* c_concat,
const sd::Tensor<float>& y = {}, ggml_tensor* y,
int num_video_frames = -1, int num_video_frames = -1,
const std::vector<sd::Tensor<float>>& controls = {}, std::vector<ggml_tensor*> controls = {},
float control_strength = 0.f) { float control_strength = 0.f,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
@ -669,7 +670,7 @@ struct UNetModelRunner : public GGMLRunner {
return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength); return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
void test() { void test() {
@ -678,8 +679,8 @@ struct UNetModelRunner : public GGMLRunner {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass // CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass
@ -688,37 +689,27 @@ struct UNetModelRunner : public GGMLRunner {
// CUDA, num_video_frames = 3, x{num_video_frames, 8, 8, 8}: nan // CUDA, num_video_frames = 3, x{num_video_frames, 8, 8, 8}: nan
int num_video_frames = 3; int num_video_frames = 3;
sd::Tensor<float> x({8, 8, 8, num_video_frames}); auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 8, num_video_frames);
std::vector<float> timesteps_vec(num_video_frames, 999.f); std::vector<float> timesteps_vec(num_video_frames, 999.f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec); auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
x.fill_(0.5f); ggml_set_f32(x, 0.5f);
// print_ggml_tensor(x); // print_ggml_tensor(x);
sd::Tensor<float> context({1024, 1, num_video_frames}); auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 1024, 1, num_video_frames);
context.fill_(0.5f); ggml_set_f32(context, 0.5f);
// print_ggml_tensor(context); // print_ggml_tensor(context);
sd::Tensor<float> y({768, num_video_frames}); auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, num_video_frames);
y.fill_(0.5f); ggml_set_f32(y, 0.5f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx);
x, int64_t t1 = ggml_time_ms();
timesteps,
context,
{},
y,
num_video_frames,
{},
0.f);
int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("unet test done in %lldms", t1 - t0); LOG_DEBUG("unet test done in %lldms", t1 - t0);
} }
} }

View File

@ -2,7 +2,6 @@
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "model.h" #include "model.h"
#include "stable-diffusion.h" #include "stable-diffusion.h"
#include "util.h"
struct UpscalerGGML { struct UpscalerGGML {
ggml_backend_t backend = nullptr; // general backend ggml_backend_t backend = nullptr; // general backend
@ -65,39 +64,6 @@ struct UpscalerGGML {
return true; return true;
} }
sd::Tensor<float> upscale_tensor(const sd::Tensor<float>& input_tensor) {
sd::Tensor<float> upscaled;
if (tile_size <= 0 || (input_tensor.shape()[0] <= tile_size && input_tensor.shape()[1] <= tile_size)) {
upscaled = esrgan_upscaler->compute(n_threads, input_tensor);
} else {
auto on_processing = [&](const sd::Tensor<float>& input_tile) -> sd::Tensor<float> {
auto output_tile = esrgan_upscaler->compute(n_threads, input_tile);
if (output_tile.empty()) {
LOG_ERROR("esrgan compute failed while processing a tile");
return {};
}
return output_tile;
};
upscaled = process_tiles_2d(input_tensor,
static_cast<int>(input_tensor.shape()[0] * esrgan_upscaler->scale),
static_cast<int>(input_tensor.shape()[1] * esrgan_upscaler->scale),
esrgan_upscaler->scale,
tile_size,
tile_size,
0.25f,
false,
false,
on_processing);
}
esrgan_upscaler->free_compute_buffer();
if (upscaled.empty()) {
LOG_ERROR("esrgan compute failed");
return {};
}
return upscaled;
}
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) { sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) {
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth // upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
sd_image_t upscaled_image = {0, 0, 0, nullptr}; sd_image_t upscaled_image = {0, 0, 0, nullptr};
@ -106,17 +72,40 @@ struct UpscalerGGML {
LOG_INFO("upscaling from (%i x %i) to (%i x %i)", LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
input_image.width, input_image.height, output_width, output_height); input_image.width, input_image.height, output_width, output_height);
sd::Tensor<float> input_tensor = sd_image_to_tensor(input_image); ggml_init_params params;
sd::Tensor<float> upscaled; params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
int64_t t0 = ggml_time_ms(); params.mem_buffer = nullptr;
upscaled = upscale_tensor(input_tensor); params.no_alloc = false;
if (upscaled.empty()) {
// draft context
ggml_context* upscale_ctx = ggml_init(params);
if (!upscale_ctx) {
LOG_ERROR("ggml_init() failed");
return upscaled_image; return upscaled_image;
} }
sd_image_t upscaled_data = tensor_to_sd_image(upscaled); // LOG_DEBUG("upscale work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
int64_t t3 = ggml_time_ms(); ggml_tensor* input_image_tensor = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, input_image.width, input_image.height, 3, 1);
sd_image_to_ggml_tensor(input_image, input_image_tensor);
ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1);
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
return esrgan_upscaler->compute(n_threads, in, &out);
};
int64_t t0 = ggml_time_ms();
// TODO: circular upscaling?
sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, false, false, on_tiling);
esrgan_upscaler->free_compute_buffer();
ggml_ext_tensor_clamp_inplace(upscaled, 0.f, 1.f);
uint8_t* upscaled_data = ggml_tensor_to_sd_image(upscaled);
ggml_free(upscale_ctx);
int64_t t3 = ggml_time_ms();
LOG_INFO("input_image_tensor upscaled, taking %.2fs", (t3 - t0) / 1000.0f); LOG_INFO("input_image_tensor upscaled, taking %.2fs", (t3 - t0) / 1000.0f);
upscaled_image = upscaled_data; upscaled_image = {
(uint32_t)output_width,
(uint32_t)output_height,
3,
upscaled_data,
};
return upscaled_image; return upscaled_image;
} }
}; };

View File

@ -479,96 +479,158 @@ const char* sd_get_system_info() {
return buffer; return buffer;
} }
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index) { sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image) {
const auto& shape = tensor.shape(); sd_image_f32_t converted_image;
GGML_ASSERT(shape.size() == 4 || shape.size() == 5); converted_image.width = image.width;
int width = static_cast<int>(shape[0]); converted_image.height = image.height;
int height = static_cast<int>(shape[1]); converted_image.channel = image.channel;
int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
uint8_t* data = (uint8_t*)malloc(static_cast<size_t>(width * height * channel));
GGML_ASSERT(data != nullptr);
for (int iw = 0; iw < width; ++iw) { // Allocate memory for float data
for (int ih = 0; ih < height; ++ih) { converted_image.data = (float*)malloc(image.width * image.height * image.channel * sizeof(float));
for (int ic = 0; ic < channel; ++ic) {
float value = shape.size() == 5 ? tensor.index(iw, ih, frame_index, ic, 0) for (uint32_t i = 0; i < image.width * image.height * image.channel; i++) {
: tensor.index(iw, ih, ic, frame_index); // Convert uint8_t to float
value = std::clamp(value, 0.0f, 1.0f); converted_image.data[i] = (float)image.data[i];
data[(ih * width + iw) * channel + ic] = static_cast<uint8_t>(std::round(value * 255.0f));
}
}
} }
return {
static_cast<uint32_t>(width), return converted_image;
static_cast<uint32_t>(height),
static_cast<uint32_t>(channel),
data,
};
} }
sd::Tensor<float> sd_image_to_tensor(sd_image_t image, // Function to perform double linear interpolation
int target_width, float interpolate(float v1, float v2, float v3, float v4, float x_ratio, float y_ratio) {
int target_height, return v1 * (1 - x_ratio) * (1 - y_ratio) + v2 * x_ratio * (1 - y_ratio) + v3 * (1 - x_ratio) * y_ratio + v4 * x_ratio * y_ratio;
bool scale) { }
sd::Tensor<float> tensor = sd::zeros<float>({static_cast<int64_t>(image.width),
static_cast<int64_t>(image.height), sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height) {
static_cast<int64_t>(image.channel), sd_image_f32_t resized_image;
1}); resized_image.width = target_width;
for (uint32_t iw = 0; iw < image.width; ++iw) { resized_image.height = target_height;
for (uint32_t ih = 0; ih < image.height; ++ih) { resized_image.channel = image.channel;
for (uint32_t ic = 0; ic < image.channel; ++ic) {
tensor.index(iw, ih, ic, 0) = sd_image_get_f32(image, iw, ih, ic, scale); // Allocate memory for resized float data
resized_image.data = (float*)malloc(target_width * target_height * image.channel * sizeof(float));
for (int y = 0; y < target_height; y++) {
for (int x = 0; x < target_width; x++) {
float original_x = (float)x * image.width / target_width;
float original_y = (float)y * image.height / target_height;
uint32_t x1 = (uint32_t)original_x;
uint32_t y1 = (uint32_t)original_y;
uint32_t x2 = std::min(x1 + 1, image.width - 1);
uint32_t y2 = std::min(y1 + 1, image.height - 1);
for (uint32_t k = 0; k < image.channel; k++) {
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
float x_ratio = original_x - x1;
float y_ratio = original_y - y1;
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
*(resized_image.data + y * target_width * image.channel + x * image.channel + k) = value;
} }
} }
} }
if (target_width >= 0 && target_height >= 0 &&
(tensor.shape()[0] != target_width || tensor.shape()[1] != target_height)) { return resized_image;
tensor = sd::ops::interpolate(tensor, }
{target_width,
target_height, void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]) {
tensor.shape()[2], for (uint32_t y = 0; y < image.height; y++) {
tensor.shape()[3]}); for (uint32_t x = 0; x < image.width; x++) {
for (uint32_t k = 0; k < image.channel; k++) {
int index = (y * image.width + x) * image.channel + k;
image.data[index] = (image.data[index] - means[k]) / stds[k];
}
}
} }
return tensor;
} }
// Constants for means and std // Constants for means and std
float means[3] = {0.48145466f, 0.4578275f, 0.40821073f}; float means[3] = {0.48145466f, 0.4578275f, 0.40821073f};
float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f}; float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f};
sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height) { // Function to clip and preprocess sd_image_f32_t
GGML_ASSERT(image.dim() == 4); sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height) {
GGML_ASSERT(image.shape()[2] == 3); float width_scale = (float)target_width / image.width;
GGML_ASSERT(image.shape()[3] == 1); float height_scale = (float)target_height / image.height;
GGML_ASSERT(target_width > 0 && target_height > 0);
float width_scale = static_cast<float>(target_width) / static_cast<float>(image.shape()[0]); float scale = std::fmax(width_scale, height_scale);
float height_scale = static_cast<float>(target_height) / static_cast<float>(image.shape()[1]);
float scale = std::fmax(width_scale, height_scale);
int64_t resized_width = static_cast<int64_t>(scale * static_cast<float>(image.shape()[0])); // Interpolation
int64_t resized_height = static_cast<int64_t>(scale * static_cast<float>(image.shape()[1])); int resized_width = (int)(scale * image.width);
int resized_height = (int)(scale * image.height);
float* resized_data = (float*)malloc(resized_width * resized_height * image.channel * sizeof(float));
sd::Tensor<float> resized = sd::ops::interpolate( for (int y = 0; y < resized_height; y++) {
image, for (int x = 0; x < resized_width; x++) {
{resized_width, resized_height, image.shape()[2], image.shape()[3]}); float original_x = (float)x * image.width / resized_width;
float original_y = (float)y * image.height / resized_height;
int64_t h_offset = std::max<int64_t>((resized_height - target_height) / 2, 0); uint32_t x1 = (uint32_t)original_x;
int64_t w_offset = std::max<int64_t>((resized_width - target_width) / 2, 0); uint32_t y1 = (uint32_t)original_y;
uint32_t x2 = std::min(x1 + 1, image.width - 1);
uint32_t y2 = std::min(y1 + 1, image.height - 1);
sd::Tensor<float> cropped({target_width, target_height, image.shape()[2], image.shape()[3]}); for (uint32_t k = 0; k < image.channel; k++) {
for (int64_t y = 0; y < target_height; ++y) { float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
for (int64_t x = 0; x < target_width; ++x) { float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
for (int64_t c = 0; c < image.shape()[2]; ++c) { float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
cropped.index(x, y, c, 0) = resized.index(x + w_offset, y + h_offset, c, 0); float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
float x_ratio = original_x - x1;
float y_ratio = original_y - y1;
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
*(resized_data + y * resized_width * image.channel + x * image.channel + k) = value;
} }
} }
} }
sd::Tensor<float> normalized = sd::ops::clamp(cropped, 0.0f, 1.0f); // Clip and preprocess
sd::Tensor<float> mean({1, 1, 3, 1}, {means[0], means[1], means[2]}); int h_offset = std::max((int)(resized_height - target_height) / 2, 0);
sd::Tensor<float> std({1, 1, 3, 1}, {stds[0], stds[1], stds[2]}); int w_offset = std::max((int)(resized_width - target_width) / 2, 0);
return (normalized - mean) / std;
sd_image_f32_t result;
result.width = target_width;
result.height = target_height;
result.channel = image.channel;
result.data = (float*)malloc(target_height * target_width * image.channel * sizeof(float));
for (uint32_t k = 0; k < image.channel; k++) {
for (uint32_t i = 0; i < result.height; i++) {
for (uint32_t j = 0; j < result.width; j++) {
int src_y = std::min(static_cast<int>(i + h_offset), resized_height - 1);
int src_x = std::min(static_cast<int>(j + w_offset), resized_width - 1);
*(result.data + i * result.width * image.channel + j * image.channel + k) =
fmin(fmax(*(resized_data + src_y * resized_width * image.channel + src_x * image.channel + k), 0.0f), 255.0f) / 255.0f;
}
}
}
// Free allocated memory
free(resized_data);
// Normalize
for (uint32_t k = 0; k < image.channel; k++) {
for (uint32_t i = 0; i < result.height; i++) {
for (uint32_t j = 0; j < result.width; j++) {
// *(result.data + i * size * image.channel + j * image.channel + k) = 0.5f;
int offset = i * result.width * image.channel + j * image.channel + k;
float value = *(result.data + offset);
value = (value - means[k]) / stds[k];
// value = 0.5f;
*(result.data + offset) = value;
}
}
}
return result;
} }
// Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/prompt_parser.py#L345 // Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/prompt_parser.py#L345

View File

@ -7,7 +7,6 @@
#include <vector> #include <vector>
#include "stable-diffusion.h" #include "stable-diffusion.h"
#include "tensor.hpp"
#define SAFE_STR(s) ((s) ? (s) : "") #define SAFE_STR(s) ((s) ? (s) : "")
#define BOOL_STR(b) ((b) ? "true" : "false") #define BOOL_STR(b) ((b) ? "true" : "false")
@ -30,14 +29,20 @@ std::string utf32_to_utf8(const std::u32string& utf32_str);
std::u32string unicode_value_to_utf32(int unicode_value); std::u32string unicode_value_to_utf32(int unicode_value);
// std::string sd_basename(const std::string& path); // std::string sd_basename(const std::string& path);
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index = 0); typedef struct {
uint32_t width;
uint32_t height;
uint32_t channel;
float* data;
} sd_image_f32_t;
sd::Tensor<float> sd_image_to_tensor(sd_image_t image, void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]);
int target_width = -1,
int target_height = -1,
bool scale = true);
sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height); sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image);
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height);
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height);
class MmapWrapper { class MmapWrapper {
public: public:

View File

@ -2,64 +2,16 @@
#define __VAE_HPP__ #define __VAE_HPP__
#include "common_block.hpp" #include "common_block.hpp"
#include "tensor_ggml.hpp"
struct VAE : public GGMLRunner { struct VAE : public GGMLRunner {
protected: protected:
SDVersion version; SDVersion version;
bool scale_input = true; bool scale_input = true;
virtual sd::Tensor<float> _compute(const int n_threads, virtual bool _compute(const int n_threads,
const sd::Tensor<float>& z, ggml_tensor* z,
bool decode_graph) = 0; bool decode_graph,
ggml_tensor** output,
static inline void scale_tensor_to_minus1_1(sd::Tensor<float>* tensor) { ggml_context* output_ctx) = 0;
GGML_ASSERT(tensor != nullptr);
for (int64_t i = 0; i < tensor->numel(); ++i) {
(*tensor)[i] = (*tensor)[i] * 2.0f - 1.0f;
}
}
static inline void scale_tensor_to_0_1(sd::Tensor<float>* tensor) {
GGML_ASSERT(tensor != nullptr);
for (int64_t i = 0; i < tensor->numel(); ++i) {
float value = ((*tensor)[i] + 1.0f) * 0.5f;
(*tensor)[i] = std::max(0.0f, std::min(1.0f, value));
}
}
sd::Tensor<float> tiled_compute(const sd::Tensor<float>& input,
int n_threads,
int output_width,
int output_height,
int scale,
int p_tile_size_x,
int p_tile_size_y,
float tile_overlap_factor,
bool circular_x,
bool circular_y,
bool decode_graph,
const char* error_message,
bool silent = false) {
auto on_processing = [&](const sd::Tensor<float>& input_tile) {
auto output_tile = _compute(n_threads, input_tile, decode_graph);
if (output_tile.empty()) {
LOG_ERROR("%s", error_message);
return sd::Tensor<float>();
}
return output_tile;
};
return ::process_tiles_2d(input,
output_width,
output_height,
scale,
p_tile_size_x,
p_tile_size_y,
tile_overlap_factor,
circular_x,
circular_y,
on_processing,
silent);
}
public: public:
VAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu) VAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu)
@ -108,109 +60,133 @@ public:
tile_size_y = get_tile_size(params.tile_size_y, params.rel_size_y, latent_y); tile_size_y = get_tile_size(params.tile_size_y, params.rel_size_y, latent_y);
} }
sd::Tensor<float> encode(int n_threads, ggml_tensor* encode(int n_threads,
const sd::Tensor<float>& x, ggml_context* work_ctx,
sd_tiling_params_t tiling_params, ggml_tensor* x,
bool circular_x = false, sd_tiling_params_t tiling_params,
bool circular_y = false) { bool circular_x = false,
int64_t t0 = ggml_time_ms(); bool circular_y = false) {
sd::Tensor<float> input = x; int64_t t0 = ggml_time_ms();
sd::Tensor<float> output; ggml_tensor* result = nullptr;
const int scale_factor = get_scale_factor();
int64_t W = x->ne[0] / scale_factor;
int64_t H = x->ne[1] / scale_factor;
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
int64_t C = get_encoder_output_channels(static_cast<int>(x->ne[channel_dim]));
int64_t ne2;
int64_t ne3;
if (sd_version_is_wan(version)) {
int64_t T = x->ne[2];
ne2 = (T - 1) / 4 + 1;
ne3 = C;
} else {
ne2 = C;
ne3 = x->ne[3];
}
result = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, ne2, ne3);
if (scale_input) { if (scale_input) {
scale_tensor_to_minus1_1(&input); scale_to_minus1_1(x);
}
if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) {
x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]);
} }
if (tiling_params.enabled) { if (tiling_params.enabled) {
const int scale_factor = get_scale_factor();
int64_t W = input.shape()[0] / scale_factor;
int64_t H = input.shape()[1] / scale_factor;
float tile_overlap; float tile_overlap;
int tile_size_x, tile_size_y; int tile_size_x, tile_size_y;
// multiply tile size for encode to keep the compute buffer size consistent
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, W, H, 1.30539f); get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, W, H, 1.30539f);
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
output = tiled_compute(input,
n_threads,
static_cast<int>(W),
static_cast<int>(H),
scale_factor,
tile_size_x,
tile_size_y,
tile_overlap,
circular_x,
circular_y,
false,
"vae encode compute failed while processing a tile");
} else {
output = _compute(n_threads, input, false);
free_compute_buffer();
}
if (output.empty()) { LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
LOG_ERROR("vae encode compute failed");
return {}; auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
return _compute(n_threads, in, false, &out, work_ctx);
};
sd_tiling_non_square(x, result, scale_factor, tile_size_x, tile_size_y, tile_overlap, circular_x, circular_y, on_tiling);
} else {
_compute(n_threads, x, false, &result, work_ctx);
} }
free_compute_buffer();
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing vae encode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000); LOG_DEBUG("computing vae encode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
return std::move(output); return result;
} }
sd::Tensor<float> decode(int n_threads, ggml_tensor* decode(int n_threads,
const sd::Tensor<float>& x, ggml_context* work_ctx,
sd_tiling_params_t tiling_params, ggml_tensor* x,
bool decode_video = false, sd_tiling_params_t tiling_params,
bool circular_x = false, bool decode_video = false,
bool circular_y = false, bool circular_x = false,
bool silent = false) { bool circular_y = false,
int64_t t0 = ggml_time_ms(); ggml_tensor* result = nullptr,
sd::Tensor<float> input = x; bool silent = false) {
sd::Tensor<float> output; const int scale_factor = get_scale_factor();
int64_t W = x->ne[0] * scale_factor;
int64_t H = x->ne[1] * scale_factor;
int64_t C = 3;
if (result == nullptr) {
if (decode_video) {
int64_t T = x->ne[2];
if (sd_version_is_wan(version)) {
T = ((T - 1) * 4) + 1;
}
result = ggml_new_tensor_4d(work_ctx,
GGML_TYPE_F32,
W,
H,
T,
3);
} else {
result = ggml_new_tensor_4d(work_ctx,
GGML_TYPE_F32,
W,
H,
C,
x->ne[3]);
}
}
int64_t t0 = ggml_time_ms();
if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) {
x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]);
}
if (tiling_params.enabled) { if (tiling_params.enabled) {
const int scale_factor = get_scale_factor();
int64_t W = input.shape()[0] * scale_factor;
int64_t H = input.shape()[1] * scale_factor;
float tile_overlap; float tile_overlap;
int tile_size_x, tile_size_y; int tile_size_x, tile_size_y;
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, input.shape()[0], input.shape()[1]); get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, x->ne[0], x->ne[1]);
if (!silent) { if (!silent) {
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y); LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
} }
output = tiled_compute(
input, auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
n_threads, return _compute(n_threads, in, true, &out, nullptr);
static_cast<int>(W), };
static_cast<int>(H), sd_tiling_non_square(x, result, scale_factor, tile_size_x, tile_size_y, tile_overlap, circular_x, circular_y, on_tiling, silent);
scale_factor,
tile_size_x,
tile_size_y,
tile_overlap,
circular_x,
circular_y,
true,
"vae decode compute failed while processing a tile",
silent);
} else { } else {
output = _compute(n_threads, input, true); if (!_compute(n_threads, x, true, &result, work_ctx)) {
LOG_ERROR("Failed to decode latetnts");
free_compute_buffer();
return nullptr;
}
} }
free_compute_buffer(); free_compute_buffer();
if (output.empty()) {
LOG_ERROR("vae decode compute failed");
return {};
}
if (scale_input) { if (scale_input) {
scale_tensor_to_0_1(&output); scale_to_0_1(result);
} }
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing vae decode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000); LOG_DEBUG("computing vae decode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
return std::move(output); ggml_ext_tensor_clamp_inplace(result, 0.0f, 1.0f);
return result;
} }
virtual sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) = 0; virtual ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) = 0;
virtual sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) = 0; virtual ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) = 0;
virtual sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) = 0; virtual ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) = 0;
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) = 0; virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) = 0;
virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); }; virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); };
}; };
@ -222,25 +198,31 @@ struct FakeVAE : public VAE {
return input_channels; return input_channels;
} }
sd::Tensor<float> _compute(const int n_threads, bool _compute(const int n_threads,
const sd::Tensor<float>& z, ggml_tensor* z,
bool decode_graph) override { bool decode_graph,
SD_UNUSED(n_threads); ggml_tensor** output,
SD_UNUSED(decode_graph); ggml_context* output_ctx) override {
return z; if (*output == nullptr && output_ctx != nullptr) {
*output = ggml_dup_tensor(output_ctx, z);
}
ggml_ext_tensor_iter(z, [&](ggml_tensor* z, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_ext_tensor_get_f32(z, i0, i1, i2, i3);
ggml_ext_tensor_set_f32(*output, value, i0, i1, i2, i3);
});
return true;
} }
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override { ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
SD_UNUSED(rng);
return vae_output; return vae_output;
} }
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override { ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return latents; return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
} }
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override { ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return latents; return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
} }
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {} void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {}

View File

@ -1131,66 +1131,105 @@ namespace WAN {
ae.get_param_tensors(tensors, prefix); ae.get_param_tensors(tensors, prefix);
} }
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override { ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
SD_UNUSED(rng);
return vae_output; return vae_output;
} }
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents) { void get_latents_mean_std_vec(ggml_tensor* latents, int channel_dim, std::vector<float>& latents_mean_vec, std::vector<float>& latents_std_vec) {
int channel_dim = latents.dim() == 5 ? 3 : 2; GGML_ASSERT(latents->ne[channel_dim] == 16 || latents->ne[channel_dim] == 48);
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1); if (latents->ne[channel_dim] == 16) { // Wan2.1 VAE
if (latents.shape()[channel_dim] == 16) { // Wan2.1 VAE latents_mean_vec = {-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f,
stats_shape[static_cast<size_t>(channel_dim)] = 16; 0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f};
latents_std_vec = {2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f,
auto mean_tensor = sd::Tensor<float>::from_vector({-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f, 3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f};
0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f}); } else if (latents->ne[channel_dim] == 48) { // Wan2.2 VAE
mean_tensor.reshape_(stats_shape); latents_mean_vec = {-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
auto std_tensor = sd::Tensor<float>::from_vector({2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f, -0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f}); -0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
std_tensor.reshape_(stats_shape); -0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
return {std::move(mean_tensor), std::move(std_tensor)}; -0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f};
latents_std_vec = {
0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f};
} }
if (latents.shape()[channel_dim] == 48) { // Wan2.2 VAE
stats_shape[static_cast<size_t>(channel_dim)] = 48;
auto mean_tensor = sd::Tensor<float>::from_vector({-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
-0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
-0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
-0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
-0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
}
GGML_ABORT("unexpected latent channel dimension %lld for version %d",
(long long)latents.shape()[channel_dim],
version);
} }
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override { ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents); ggml_tensor* vae_latents = ggml_dup(work_ctx, latents);
return (latents * std_tensor) / scale_factor + mean_tensor; int channel_dim = sd_version_is_wan(version) ? 3 : 2;
std::vector<float> latents_mean_vec;
std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = value * std_ / scale_factor + mean;
ggml_ext_tensor_set_f32(vae_latents, value, l, k, j, i);
}
}
}
}
return vae_latents;
} }
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override { ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents); ggml_tensor* diffusion_latents = ggml_dup(work_ctx, latents);
return ((latents - mean_tensor) * scale_factor) / std_tensor; int channel_dim = sd_version_is_wan(version) ? 3 : 2;
std::vector<float> latents_mean_vec;
std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = (value - mean) * scale_factor / std_;
ggml_ext_tensor_set_f32(diffusion_latents, value, l, k, j, i);
}
}
}
}
return diffusion_latents;
} }
int get_encoder_output_channels(int input_channels) { int get_encoder_output_channels(int input_channels) {
return static_cast<int>(ae.z_dim); return static_cast<int>(ae.z_dim);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) { ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* gf = new_graph_custom(10240 * z_tensor.shape()[2]); ggml_cgraph* gf = new_graph_custom(10240 * z->ne[2]);
ggml_tensor* z = make_input(z_tensor);
z = to_backend(z);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
@ -1201,7 +1240,7 @@ namespace WAN {
return gf; return gf;
} }
ggml_cgraph* build_graph_partial(const sd::Tensor<float>& z_tensor, bool decode_graph, int i) { ggml_cgraph* build_graph_partial(ggml_tensor* z, bool decode_graph, int i) {
ggml_cgraph* gf = new_graph_custom(20480); ggml_cgraph* gf = new_graph_custom(20480);
ae.clear_cache(); ae.clear_cache();
@ -1211,7 +1250,7 @@ namespace WAN {
ae._feat_map[feat_idx] = feat_cache; ae._feat_map[feat_idx] = feat_cache;
} }
ggml_tensor* z = make_input(z_tensor); z = to_backend(z);
auto runner_ctx = get_context(); auto runner_ctx = get_context();
@ -1230,57 +1269,58 @@ namespace WAN {
return gf; return gf;
} }
sd::Tensor<float> _compute(const int n_threads, bool _compute(const int n_threads,
const sd::Tensor<float>& z, ggml_tensor* z,
bool decode_graph) override { bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) override {
if (true) { if (true) {
sd::Tensor<float> input;
if (z.dim() == 4) {
input = z.unsqueeze(2);
}
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
if (input.empty()) { return build_graph(z, decode_graph);
return build_graph(z, decode_graph);
} else {
return build_graph(input, decode_graph);
}
}; };
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, true), return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
input.empty() ? z.dim() : input.dim());
if (!result.empty() && z.dim() == 4) {
result.squeeze_(2);
}
return result;
} else { // chunk 1 result is weird } else { // chunk 1 result is weird
ae.clear_cache(); ae.clear_cache();
int64_t t = z.shape()[2]; int64_t t = z->ne[2];
int i = 0; int i = 0;
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph_partial(z, decode_graph, i); return build_graph_partial(z, decode_graph, i);
}; };
auto out_opt = GGMLRunner::compute<float>(get_graph, n_threads, true); ggml_tensor* out = nullptr;
if (!out_opt.has_value()) { bool res = GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
return {};
}
sd::Tensor<float> out = std::move(*out_opt);
ae.clear_cache(); ae.clear_cache();
if (t == 1) { if (t == 1) {
return out; *output = out;
return res;
} }
sd::Tensor<float> output = std::move(out); *output = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], (t - 1) * 4 + 1, out->ne[3]);
auto copy_to_output = [&]() {
for (int64_t i3 = 0; i3 < out->ne[3]; i3++) {
for (int64_t i2 = 0; i2 < out->ne[2]; i2++) {
for (int64_t i1 = 0; i1 < out->ne[1]; i1++) {
for (int64_t i0 = 0; i0 < out->ne[0]; i0++) {
float value = ggml_ext_tensor_get_f32(out, i0, i1, i2, i3);
int64_t offset = (i == 0) ? 0 : (1 + (i - 1) * 4);
ggml_ext_tensor_set_f32(*output, value, i0, i1, offset + i2, i3);
}
}
}
}
};
copy_to_output();
out = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], 4, out->ne[3]);
for (i = 1; i < t; i++) { for (i = 1; i < t; i++) {
auto chunk_opt = GGMLRunner::compute<float>(get_graph, n_threads, true); res = res || GGMLRunner::compute(get_graph, n_threads, true, &out);
if (!chunk_opt.has_value()) {
return {};
}
out = std::move(*chunk_opt);
ae.clear_cache(); ae.clear_cache();
output = sd::ops::concat(output, out, 2); copy_to_output();
} }
free_cache_ctx_and_buffer(); free_cache_ctx_and_buffer();
return output; return res;
} }
} }
@ -1290,25 +1330,25 @@ namespace WAN {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
if (true) { if (true) {
// cpu f32, pass // cpu f32, pass
// cpu f16, pass // cpu f16, pass
// cuda f16, pass // cuda f16, pass
// cuda f32, pass // cuda f32, pass
auto z = sd::load_tensor_from_file_as_tensor<float>("wan_vae_z.bin"); auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 2, 16);
print_sd_tensor(z); ggml_set_f32(z, 0.5f);
sd::Tensor<float> out; z = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
print_ggml_tensor(z);
ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = _compute(8, z, true); _compute(8, z, true, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("decode test done in %ldms", t1 - t0); LOG_DEBUG("decode test done in %ldms", t1 - t0);
} }
}; };
@ -2189,23 +2229,23 @@ namespace WAN {
wan.get_param_tensors(tensors, prefix); wan.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor = {}, ggml_tensor* context,
const sd::Tensor<float>& clip_fea_tensor = {}, ggml_tensor* clip_fea = nullptr,
const sd::Tensor<float>& c_concat_tensor = {}, ggml_tensor* c_concat = nullptr,
const sd::Tensor<float>& time_dim_concat_tensor = {}, ggml_tensor* time_dim_concat = nullptr,
const sd::Tensor<float>& vace_context_tensor = {}, ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f) { float vace_strength = 1.f) {
ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE); ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor); x = to_backend(x);
ggml_tensor* timesteps = make_input(timesteps_tensor); timesteps = to_backend(timesteps);
ggml_tensor* context = make_optional_input(context_tensor); context = to_backend(context);
ggml_tensor* clip_fea = make_optional_input(clip_fea_tensor); clip_fea = to_backend(clip_fea);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor); c_concat = to_backend(c_concat);
ggml_tensor* time_dim_concat = make_optional_input(time_dim_concat_tensor); time_dim_concat = to_backend(time_dim_concat);
ggml_tensor* vace_context = make_optional_input(vace_context_tensor); vace_context = to_backend(vace_context);
pe_vec = Rope::gen_wan_pe(static_cast<int>(x->ne[2]), pe_vec = Rope::gen_wan_pe(static_cast<int>(x->ne[2]),
static_cast<int>(x->ne[1]), static_cast<int>(x->ne[1]),
@ -2245,20 +2285,22 @@ namespace WAN {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context = {}, ggml_tensor* context,
const sd::Tensor<float>& clip_fea = {}, ggml_tensor* clip_fea = nullptr,
const sd::Tensor<float>& c_concat = {}, ggml_tensor* c_concat = nullptr,
const sd::Tensor<float>& time_dim_concat = {}, ggml_tensor* time_dim_concat = nullptr,
const sd::Tensor<float>& vace_context = {}, ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f) { float vace_strength = 1.f,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* { auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength); return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
void test() { void test() {
@ -2267,38 +2309,36 @@ namespace WAN {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// cpu f16: pass // cpu f16: pass
// cuda f16: pass // cuda f16: pass
// cpu q8_0: pass // cpu q8_0: pass
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 104, 60, 1, 16); // auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 1, 16);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
auto x = sd::load_tensor_from_file_as_tensor<float>("wan_dit_x.bin"); auto x = load_tensor_from_file(work_ctx, "wan_dit_x.bin");
print_sd_tensor(x); print_ggml_tensor(x);
std::vector<float> timesteps_vec(3, 1000.f); std::vector<float> timesteps_vec(3, 1000.f);
timesteps_vec[0] = 0.f; timesteps_vec[0] = 0.f;
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec); auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 4096, 512, 1); // auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 512, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
auto context = sd::load_tensor_from_file_as_tensor<float>("wan_dit_context.bin"); auto context = load_tensor_from_file(work_ctx, "wan_dit_context.bin");
print_sd_tensor(context); print_ggml_tensor(context);
// auto clip_fea = load_tensor_from_file(ctx, "wan_dit_clip_fea.bin"); // auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin");
// print_ggml_tensor(clip_fea); // print_ggml_tensor(clip_fea);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, x, timesteps, context, {}, {}, {}, {}, 1.f); compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx);
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("wan test done in %lldms", t1 - t0); LOG_DEBUG("wan test done in %lldms", t1 - t0);
} }
} }

View File

@ -481,21 +481,20 @@ namespace ZImage {
z_image.get_param_tensors(tensors, prefix); z_image.get_param_tensors(tensors, prefix);
} }
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor, ggml_cgraph* build_graph(ggml_tensor* x,
const sd::Tensor<float>& timesteps_tensor, ggml_tensor* timesteps,
const sd::Tensor<float>& context_tensor, ggml_tensor* context,
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false) { bool increase_ref_index = false) {
ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
GGML_ASSERT(!context_tensor.empty()); ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
ggml_tensor* context = make_input(context_tensor);
std::vector<ggml_tensor*> ref_latents; x = to_backend(x);
ref_latents.reserve(ref_latents_tensor.size()); context = to_backend(context);
for (const auto& ref_latent_tensor : ref_latents_tensor) { timesteps = to_backend(timesteps);
ref_latents.push_back(make_input(ref_latent_tensor));
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
} }
pe_vec = Rope::gen_z_image_pe(static_cast<int>(x->ne[1]), pe_vec = Rope::gen_z_image_pe(static_cast<int>(x->ne[1]),
@ -531,12 +530,14 @@ namespace ZImage {
return gf; return gf;
} }
sd::Tensor<float> compute(int n_threads, bool compute(int n_threads,
const sd::Tensor<float>& x, ggml_tensor* x,
const sd::Tensor<float>& timesteps, ggml_tensor* timesteps,
const sd::Tensor<float>& context, ggml_tensor* context,
const std::vector<sd::Tensor<float>>& ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false) { bool increase_ref_index = false,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
@ -544,7 +545,7 @@ namespace ZImage {
return build_graph(x, timesteps, context, ref_latents, increase_ref_index); return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
}; };
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim()); return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
} }
void test() { void test() {
@ -553,37 +554,30 @@ namespace ZImage {
params.mem_buffer = nullptr; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
ggml_context* ctx = ggml_init(params); ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr); GGML_ASSERT(work_ctx != nullptr);
{ {
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1); // auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// ggml_set_f32(x, 0.01f); // ggml_set_f32(x, 0.01f);
auto x = sd::load_tensor_from_file_as_tensor<float>("./z_image_x.bin"); auto x = load_tensor_from_file(work_ctx, "./z_image_x.bin");
print_sd_tensor(x); print_ggml_tensor(x);
std::vector<float> timesteps_vec(1, 0.f); std::vector<float> timesteps_vec(1, 0.f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec); auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 2560, 256, 1); // auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 2560, 256, 1);
// ggml_set_f32(context, 0.01f); // ggml_set_f32(context, 0.01f);
auto context = sd::load_tensor_from_file_as_tensor<float>("./z_image_context.bin"); auto context = load_tensor_from_file(work_ctx, "./z_image_context.bin");
print_sd_tensor(context); print_ggml_tensor(context);
sd::Tensor<float> out; ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, compute(8, x, timesteps, context, {}, false, &out, work_ctx);
x, int64_t t1 = ggml_time_ms();
timesteps,
context,
{},
false);
int64_t t1 = ggml_time_ms();
GGML_ASSERT(!out_opt.empty()); print_ggml_tensor(out);
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("z_image test done in %lldms", t1 - t0); LOG_DEBUG("z_image test done in %lldms", t1 - t0);
} }
} }