refactor: migrate generation pipeline to sd::Tensor (#1373)

This commit is contained in:
leejet 2026-03-30 00:19:25 +08:00 committed by GitHub
parent ed88e215a2
commit f16a110f87
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 7768 additions and 7098 deletions

View File

@ -601,7 +601,7 @@ int main(int argc, const char* argv[]) {
if (gen_params.end_image_path.size() > 0) {
vae_decode_only = false;
if (!load_image_and_update_size(gen_params.init_image_path, end_image)) {
if (!load_image_and_update_size(gen_params.end_image_path, end_image)) {
return 1;
}
}

View File

@ -602,20 +602,19 @@ namespace Anima {
return Rope::embed_nd(ids, bs, axis_thetas, axes_dim);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* t5_ids = nullptr,
ggml_tensor* t5_weights = nullptr) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<int32_t>& t5_ids_tensor = {},
const sd::Tensor<float>& t5_weights_tensor = {}) {
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* t5_ids = make_optional_input(t5_ids_tensor);
ggml_tensor* t5_weights = make_optional_input(t5_weights_tensor);
GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(ANIMA_GRAPH_SIZE);
x = to_backend(x);
timesteps = to_backend(timesteps);
context = to_backend(context);
t5_ids = to_backend(t5_ids);
t5_weights = to_backend(t5_weights);
int64_t pad_h = (net.patch_size - x->ne[1] % net.patch_size) % net.patch_size;
int64_t pad_w = (net.patch_size - x->ne[0] % net.patch_size) % net.patch_size;
int64_t h_pad = x->ne[1] + pad_h;
@ -667,18 +666,16 @@ namespace Anima {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* t5_ids = nullptr,
ggml_tensor* t5_weights = nullptr,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<int32_t>& t5_ids = {},
const sd::Tensor<float>& t5_weights = {}) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, t5_ids, t5_weights);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
};
} // namespace Anima

View File

@ -1,4 +1,4 @@
#ifndef __AUTO_ENCODER_KL_HPP__
#ifndef __AUTO_ENCODER_KL_HPP__
#define __AUTO_ENCODER_KL_HPP__
#include "vae.hpp"
@ -685,10 +685,9 @@ struct AutoEncoderKL : public VAE {
ae.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
z = to_backend(z);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
@ -699,184 +698,100 @@ struct AutoEncoderKL : public VAE {
return gf;
}
bool _compute(const int n_threads,
ggml_tensor* z,
bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z,
bool decode_graph) override {
GGML_ASSERT(!decode_only || decode_graph);
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z, decode_graph);
};
// ggml_set_f32(z, 0.5f);
// print_ggml_tensor(z);
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z.dim());
}
ggml_tensor* gaussian_latent_sample(ggml_context* work_ctx, ggml_tensor* moments, std::shared_ptr<RNG> rng) {
sd::Tensor<float> gaussian_latent_sample(const sd::Tensor<float>& moments, std::shared_ptr<RNG> rng) {
// ldm.modules.distributions.distributions.DiagonalGaussianDistribution.sample
ggml_tensor* latents = ggml_new_tensor_4d(work_ctx, moments->type, moments->ne[0], moments->ne[1], moments->ne[2] / 2, moments->ne[3]);
ggml_tensor* noise = ggml_dup_tensor(work_ctx, latents);
ggml_ext_im_set_randn_f32(noise, rng);
{
float mean = 0;
float logvar = 0;
float value = 0;
float std_ = 0;
for (int i = 0; i < latents->ne[3]; i++) {
for (int j = 0; j < latents->ne[2]; j++) {
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
mean = ggml_ext_tensor_get_f32(moments, l, k, j, i);
logvar = ggml_ext_tensor_get_f32(moments, l, k, j + (int)latents->ne[2], i);
logvar = std::max(-30.0f, std::min(logvar, 20.0f));
std_ = std::exp(0.5f * logvar);
value = mean + std_ * ggml_ext_tensor_get_f32(noise, l, k, j, i);
// printf("%d %d %d %d -> %f\n", i, j, k, l, value);
ggml_ext_tensor_set_f32(latents, value, l, k, j, i);
}
}
}
}
}
auto chunks = sd::ops::chunk(moments, 2, 2);
const auto& mean = chunks[0];
const auto& logvar = chunks[1];
sd::Tensor<float> stddev = sd::ops::exp(0.5f * sd::ops::clamp(logvar, -30.0f, 20.0f));
sd::Tensor<float> noise = sd::Tensor<float>::randn_like(mean, rng);
sd::Tensor<float> latents = mean + stddev * noise;
return latents;
}
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
if (sd_version_is_flux2(version)) {
return vae_output;
} else if (version == VERSION_SD1_PIX2PIX) {
return ggml_view_3d(work_ctx,
vae_output,
vae_output->ne[0],
vae_output->ne[1],
vae_output->ne[2] / 2,
vae_output->nb[1],
vae_output->nb[2],
0);
return sd::ops::chunk(vae_output, 2, 2)[0];
} else {
return gaussian_latent_sample(work_ctx, vae_output, rng);
return gaussian_latent_sample(vae_output, rng);
}
}
void get_latents_mean_std_vec(ggml_tensor* latents, int channel_dim, std::vector<float>& latents_mean_vec, std::vector<float>& latents_std_vec) {
// flux2
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents, int channel_dim) {
GGML_ASSERT(channel_dim >= 0 && static_cast<size_t>(channel_dim) < static_cast<size_t>(latents.dim()));
if (sd_version_is_flux2(version)) {
GGML_ASSERT(latents->ne[channel_dim] == 128);
latents_mean_vec = {-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f,
-0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f,
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f,
0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f,
-0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f,
0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f,
0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f,
0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f,
-0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f,
0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f,
-0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f,
0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f,
-0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f,
0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f,
-0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f,
-0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f};
latents_std_vec = {
1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f,
1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f,
1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f,
1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f,
1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f,
1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f,
1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f,
1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f,
1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f,
1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f,
1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f,
1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f,
1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f,
1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f};
GGML_ASSERT(latents.shape()[channel_dim] == 128);
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1);
stats_shape[static_cast<size_t>(channel_dim)] = latents.shape()[channel_dim];
auto mean_tensor = sd::Tensor<float>::from_vector({-0.0676f, -0.0715f, -0.0753f, -0.0745f, 0.0223f, 0.0180f, 0.0142f, 0.0184f,
-0.0001f, -0.0063f, -0.0002f, -0.0031f, -0.0272f, -0.0281f, -0.0276f, -0.0290f,
-0.0769f, -0.0672f, -0.0902f, -0.0892f, 0.0168f, 0.0152f, 0.0079f, 0.0086f,
0.0083f, 0.0015f, 0.0003f, -0.0043f, -0.0439f, -0.0419f, -0.0438f, -0.0431f,
-0.0102f, -0.0132f, -0.0066f, -0.0048f, -0.0311f, -0.0306f, -0.0279f, -0.0180f,
0.0030f, 0.0015f, 0.0126f, 0.0145f, 0.0347f, 0.0338f, 0.0337f, 0.0283f,
0.0020f, 0.0047f, 0.0047f, 0.0050f, 0.0123f, 0.0081f, 0.0081f, 0.0146f,
0.0681f, 0.0679f, 0.0767f, 0.0732f, -0.0462f, -0.0474f, -0.0392f, -0.0511f,
-0.0528f, -0.0477f, -0.0470f, -0.0517f, -0.0317f, -0.0316f, -0.0345f, -0.0283f,
0.0510f, 0.0445f, 0.0578f, 0.0458f, -0.0412f, -0.0458f, -0.0487f, -0.0467f,
-0.0088f, -0.0106f, -0.0088f, -0.0046f, -0.0376f, -0.0432f, -0.0436f, -0.0499f,
0.0118f, 0.0166f, 0.0203f, 0.0279f, 0.0113f, 0.0129f, 0.0016f, 0.0072f,
-0.0118f, -0.0018f, -0.0141f, -0.0054f, -0.0091f, -0.0138f, -0.0145f, -0.0187f,
0.0323f, 0.0305f, 0.0259f, 0.0300f, 0.0540f, 0.0614f, 0.0495f, 0.0590f,
-0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f,
-0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f,
1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f,
1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f,
1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f,
1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f,
1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f,
1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f,
1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f,
1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f,
1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f,
1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f,
1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f,
1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f,
1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f,
1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f,
1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
} else {
GGML_ABORT("unknown version %d", version);
}
}
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
ggml_tensor* vae_latents = ggml_dup(work_ctx, latents);
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
if (sd_version_is_flux2(version)) {
int channel_dim = 2;
std::vector<float> latents_mean_vec;
std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = value * std_ / scale_factor + mean;
ggml_ext_tensor_set_f32(vae_latents, value, l, k, j, i);
}
}
}
}
} else {
ggml_ext_tensor_iter(latents, [&](ggml_tensor* latents, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_ext_tensor_get_f32(latents, i0, i1, i2, i3);
value = (value / scale_factor) + shift_factor;
ggml_ext_tensor_set_f32(vae_latents, value, i0, i1, i2, i3);
});
int channel_dim = 2;
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim);
return (latents * std_tensor) / scale_factor + mean_tensor;
}
return vae_latents;
return (latents / scale_factor) + shift_factor;
}
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
ggml_tensor* diffusion_latents = ggml_dup(work_ctx, latents);
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
if (sd_version_is_flux2(version)) {
int channel_dim = 2;
std::vector<float> latents_mean_vec;
std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = (value - mean) * scale_factor / std_;
ggml_ext_tensor_set_f32(diffusion_latents, value, l, k, j, i);
}
}
}
}
} else {
ggml_ext_tensor_iter(latents, [&](ggml_tensor* latents, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_ext_tensor_get_f32(latents, i0, i1, i2, i3);
value = (value - shift_factor) * scale_factor;
ggml_ext_tensor_set_f32(diffusion_latents, value, i0, i1, i2, i3);
});
int channel_dim = 2;
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents, channel_dim);
return ((latents - mean_tensor) * scale_factor) / std_tensor;
}
return diffusion_latents;
return (latents - shift_factor) * scale_factor;
}
int get_encoder_output_channels(int input_channels) {
@ -889,24 +804,26 @@ struct AutoEncoderKL : public VAE {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// CPU, x{1, 3, 64, 64}: Pass
// CUDA, x{1, 3, 64, 64}: Pass, but sill get wrong result for some image, may be due to interlnal nan
// CPU, x{2, 3, 64, 64}: Wrong result
// CUDA, x{2, 3, 64, 64}: Wrong result, and different from CPU result
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2);
ggml_set_f32(x, 0.5f);
print_ggml_tensor(x);
ggml_tensor* out = nullptr;
sd::Tensor<float> x({64, 64, 3, 2});
x.fill_(0.5f);
print_sd_tensor(x);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
_compute(8, x, false, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = _compute(8, x, false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("encode test done in %lldms", t1 - t0);
}
@ -915,16 +832,18 @@ struct AutoEncoderKL : public VAE {
// CUDA, z{1, 4, 8, 8}: Pass
// CPU, z{3, 4, 8, 8}: Wrong result
// CUDA, z{3, 4, 8, 8}: Wrong result, and different from CPU result
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1);
ggml_set_f32(z, 0.5f);
print_ggml_tensor(z);
ggml_tensor* out = nullptr;
sd::Tensor<float> z({8, 8, 4, 1});
z.fill_(0.5f);
print_sd_tensor(z);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
_compute(8, z, true, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = _compute(8, z, true);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("decode test done in %lldms", t1 - t0);
}
};

View File

@ -8,7 +8,9 @@
#include <unordered_map>
#include <vector>
#include "condition_cache_utils.hpp"
#include "ggml_extend.hpp"
#include "tensor.hpp"
struct DBCacheConfig {
bool enabled = false;
@ -771,35 +773,37 @@ struct CacheDitConditionState {
return it != cache_diffs.end() && !it->second.diff.empty();
}
void update_cache(const void* cond, const float* input, const float* output, size_t size) {
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
CacheEntry& entry = cache_diffs[cond];
entry.diff.resize(size);
for (size_t i = 0; i < size; i++) {
entry.diff[i] = output[i] - input[i];
if (!sd::store_condition_cache_diff(&entry.diff, input, output)) {
entry.prev_input.clear();
entry.prev_output.clear();
entry.has_prev = false;
return;
}
size_t size = static_cast<size_t>(output.numel());
const float* input_data = input.data();
const float* output_data = output.data();
entry.prev_input.resize(size);
entry.prev_output.resize(size);
for (size_t i = 0; i < size; i++) {
entry.prev_input[i] = input[i];
entry.prev_output[i] = output[i];
entry.prev_input[i] = input_data[i];
entry.prev_output[i] = output_data[i];
}
entry.has_prev = true;
}
void apply_cache(const void* cond, const float* input, float* output, size_t size) {
void apply_cache(const void* cond,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty())
return;
if (it->second.diff.size() != size)
return;
for (size_t i = 0; i < size; i++) {
output[i] = input[i] + it->second.diff[i];
}
sd::apply_condition_cache_diff(it->second.diff, input, output);
}
bool before_condition(const void* cond, ggml_tensor* input, ggml_tensor* output, float sigma, int step_index) {
bool before_condition(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output, float sigma, int step_index) {
if (!enabled() || step_index < 0)
return false;
@ -819,8 +823,7 @@ struct CacheDitConditionState {
if (skip_current_step) {
if (has_cache(cond)) {
apply_cache(cond, (float*)input->data, (float*)output->data,
static_cast<size_t>(ggml_nelements(output)));
apply_cache(cond, input, output);
return true;
}
return false;
@ -833,13 +836,13 @@ struct CacheDitConditionState {
if (it == cache_diffs.end() || !it->second.has_prev)
return false;
size_t ne = static_cast<size_t>(ggml_nelements(input));
size_t ne = static_cast<size_t>(input.numel());
if (it->second.prev_input.size() != ne)
return false;
float* input_data = (float*)input->data;
float diff = CacheDitState::calculate_residual_diff(
it->second.prev_input.data(), input_data, ne);
const float* input_data = input.data();
float diff = CacheDitState::calculate_residual_diff(
it->second.prev_input.data(), input_data, ne);
float effective_threshold = config.residual_diff_threshold;
if (config.Fn_compute_blocks > 0) {
@ -859,7 +862,7 @@ struct CacheDitConditionState {
cached_steps.push_back(current_step_index);
continuous_cached_steps++;
accumulated_residual_diff += diff;
apply_cache(cond, input_data, (float*)output->data, ne);
apply_cache(cond, input, output);
return true;
}
@ -867,15 +870,14 @@ struct CacheDitConditionState {
return false;
}
void after_condition(const void* cond, ggml_tensor* input, ggml_tensor* output) {
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
if (!step_is_active())
return;
size_t ne = static_cast<size_t>(ggml_nelements(output));
update_cache(cond, (float*)input->data, (float*)output->data, ne);
update_cache(cond, input, output);
if (cond == anchor_condition && taylor_config.enabled) {
taylor_state.update_derivatives((float*)output->data, ne, current_step_index);
taylor_state.update_derivatives(output.data(), static_cast<size_t>(output.numel()), current_step_index);
}
}

View File

@ -957,15 +957,14 @@ struct CLIPTextModelRunner : public GGMLRunner {
return model.forward(ctx, input_ids, embeddings, mask, max_token_idx, return_pooled, clip_skip);
}
ggml_cgraph* build_graph(ggml_tensor* input_ids,
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
int num_custom_embeddings = 0,
void* custom_embeddings_data = nullptr,
size_t max_token_idx = 0,
bool return_pooled = false,
int clip_skip = -1) {
ggml_cgraph* gf = new_graph_custom(2048);
input_ids = to_backend(input_ids);
ggml_cgraph* gf = new_graph_custom(2048);
ggml_tensor* input_ids = make_input(input_ids_tensor);
ggml_tensor* embeddings = nullptr;
@ -1004,19 +1003,21 @@ struct CLIPTextModelRunner : public GGMLRunner {
return gf;
}
bool compute(const int n_threads,
ggml_tensor* input_ids,
int num_custom_embeddings,
void* custom_embeddings_data,
size_t max_token_idx,
bool return_pooled,
int clip_skip,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(const int n_threads,
const sd::Tensor<int32_t>& input_ids,
int num_custom_embeddings,
void* custom_embeddings_data,
size_t max_token_idx,
bool return_pooled,
int clip_skip) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
};
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
auto result = GGMLRunner::compute<float>(get_graph, n_threads, true);
if (return_pooled) {
return take_or_empty(std::move(result));
}
return restore_trailing_singleton_dims(std::move(result), 3);
}
};

View File

@ -4,11 +4,11 @@
#include "ggml_extend.hpp"
namespace DiT {
ggml_tensor* patchify(ggml_context* ctx,
ggml_tensor* x,
int pw,
int ph,
bool patch_last = true) {
inline ggml_tensor* patchify(ggml_context* ctx,
ggml_tensor* x,
int pw,
int ph,
bool patch_last = true) {
// x: [N, C, H, W]
// return: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C]
int64_t N = x->ne[3];
@ -33,13 +33,13 @@ namespace DiT {
return x;
}
ggml_tensor* unpatchify(ggml_context* ctx,
ggml_tensor* x,
int64_t h,
int64_t w,
int ph,
int pw,
bool patch_last = true) {
inline ggml_tensor* unpatchify(ggml_context* ctx,
ggml_tensor* x,
int64_t h,
int64_t w,
int ph,
int pw,
bool patch_last = true) {
// x: [N, h*w, C*ph*pw] if patch_last else [N, h*w, ph*pw*C]
// return: [N, C, H, W]
int64_t N = x->ne[2];
@ -64,10 +64,10 @@ namespace DiT {
return x;
}
ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
ggml_tensor* x,
int ph,
int pw) {
inline ggml_tensor* pad_to_patch_size(GGMLRunnerContext* ctx,
ggml_tensor* x,
int ph,
int pw) {
int64_t W = x->ne[0];
int64_t H = x->ne[1];
@ -77,23 +77,23 @@ namespace DiT {
return x;
}
ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx,
ggml_tensor* x,
int ph,
int pw,
bool patch_last = true) {
inline ggml_tensor* pad_and_patchify(GGMLRunnerContext* ctx,
ggml_tensor* x,
int ph,
int pw,
bool patch_last = true) {
x = pad_to_patch_size(ctx, x, ph, pw);
x = patchify(ctx->ggml_ctx, x, ph, pw, patch_last);
return x;
}
ggml_tensor* unpatchify_and_crop(ggml_context* ctx,
ggml_tensor* x,
int64_t H,
int64_t W,
int ph,
int pw,
bool patch_last = true) {
inline ggml_tensor* unpatchify_and_crop(ggml_context* ctx,
ggml_tensor* x,
int64_t H,
int64_t W,
int ph,
int pw,
bool patch_last = true) {
int pad_h = (ph - H % ph) % ph;
int pad_w = (pw - W % pw) % pw;
int64_t h = ((H + pad_h) / ph);
@ -105,4 +105,4 @@ namespace DiT {
}
} // namespace DiT
#endif // __COMMON_DIT_HPP__
#endif // __COMMON_DIT_HPP__

View File

@ -0,0 +1,64 @@
#ifndef __CONDITION_CACHE_UTILS_HPP__
#define __CONDITION_CACHE_UTILS_HPP__
#include <vector>
#include "tensor.hpp"
namespace sd {
inline bool store_condition_cache_diff(std::vector<float>* diff,
const sd::Tensor<float>& input,
const sd::Tensor<float>& output) {
if (diff == nullptr || input.empty() || output.empty()) {
return false;
}
size_t input_size = static_cast<size_t>(input.numel());
size_t output_size = static_cast<size_t>(output.numel());
if (input_size == 0 || input_size != output_size) {
diff->clear();
return false;
}
const float* input_data = input.data();
const float* output_data = output.data();
if (input_data == nullptr || output_data == nullptr) {
diff->clear();
return false;
}
diff->resize(output_size);
for (size_t i = 0; i < output_size; ++i) {
(*diff)[i] = output_data[i] - input_data[i];
}
return true;
}
inline bool apply_condition_cache_diff(const std::vector<float>& diff,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
if (output == nullptr || input.empty() || diff.empty()) {
return false;
}
size_t input_size = static_cast<size_t>(input.numel());
if (input_size == 0 || diff.size() != input_size) {
return false;
}
*output = input;
float* output_data = output->data();
if (output_data == nullptr) {
return false;
}
for (size_t i = 0; i < input_size; ++i) {
output_data[i] += diff[i];
}
return true;
}
} // namespace sd
#endif // __CONDITION_CACHE_UTILS_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -310,11 +310,13 @@ struct ControlNet : public GGMLRunner {
SDVersion version = VERSION_SD1;
ControlNetBlock control_net;
ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory
ggml_backend_buffer_t control_buffer = nullptr;
ggml_context* control_ctx = nullptr;
std::vector<ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5
ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference
bool guided_hint_cached = false;
std::vector<ggml_tensor*> control_outputs_ggml;
ggml_tensor* guided_hint_output_ggml = nullptr;
std::vector<sd::Tensor<float>> controls;
sd::Tensor<float> guided_hint;
bool guided_hint_cached = false;
ControlNet(ggml_backend_t backend,
bool offload_params_to_cpu,
@ -335,16 +337,16 @@ struct ControlNet : public GGMLRunner {
params.no_alloc = true;
control_ctx = ggml_init(params);
controls.resize(outs.size() - 1);
control_outputs_ggml.resize(outs.size() - 1);
size_t control_buffer_size = 0;
guided_hint = ggml_dup_tensor(control_ctx, outs[0]);
control_buffer_size += ggml_nbytes(guided_hint);
guided_hint_output_ggml = ggml_dup_tensor(control_ctx, outs[0]);
control_buffer_size += ggml_nbytes(guided_hint_output_ggml);
for (int i = 0; i < outs.size() - 1; i++) {
controls[i] = ggml_dup_tensor(control_ctx, outs[i + 1]);
control_buffer_size += ggml_nbytes(controls[i]);
control_outputs_ggml[i] = ggml_dup_tensor(control_ctx, outs[i + 1]);
control_buffer_size += ggml_nbytes(control_outputs_ggml[i]);
}
control_buffer = ggml_backend_alloc_ctx_tensors(control_ctx, runtime_backend);
@ -361,8 +363,10 @@ struct ControlNet : public GGMLRunner {
ggml_free(control_ctx);
control_ctx = nullptr;
}
guided_hint = nullptr;
guided_hint_cached = false;
guided_hint_output_ggml = nullptr;
guided_hint_cached = false;
guided_hint = {};
control_outputs_ggml.clear();
controls.clear();
}
@ -374,29 +378,33 @@ struct ControlNet : public GGMLRunner {
control_net.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* hint,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* y = nullptr) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& hint_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<float>& y_tensor = {}) {
ggml_cgraph* gf = new_graph_custom(CONTROL_NET_GRAPH_SIZE);
x = to_backend(x);
if (guided_hint_cached) {
hint = nullptr;
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* hint = nullptr;
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
ggml_tensor* guided_hint_input = nullptr;
if (guided_hint_cached && !guided_hint.empty()) {
guided_hint_input = make_input(guided_hint);
hint = nullptr;
} else {
hint = to_backend(hint);
hint = make_input(hint_tensor);
}
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
auto runner_ctx = get_context();
auto outs = control_net.forward(&runner_ctx,
x,
hint,
guided_hint_cached ? guided_hint : nullptr,
guided_hint_input,
timesteps,
context,
y);
@ -405,22 +413,20 @@ struct ControlNet : public GGMLRunner {
alloc_control_ctx(outs);
}
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint));
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[0], guided_hint_output_ggml));
for (int i = 0; i < outs.size() - 1; i++) {
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], controls[i]));
ggml_build_forward_expand(gf, ggml_cpy(compute_ctx, outs[i + 1], control_outputs_ggml[i]));
}
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* hint,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* y,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
std::optional<std::vector<sd::Tensor<float>>> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& hint,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<float>& y = {}) {
// x: [N, in_channels, h, w]
// timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
@ -429,12 +435,24 @@ struct ControlNet : public GGMLRunner {
return build_graph(x, hint, timesteps, context, y);
};
bool res = GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
if (res) {
// cache guided_hint
guided_hint_cached = true;
auto compute_result = GGMLRunner::compute<float>(get_graph, n_threads, false);
if (!compute_result.has_value()) {
return std::nullopt;
}
return res;
if (guided_hint_output_ggml != nullptr) {
guided_hint = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(guided_hint_output_ggml),
4);
}
controls.clear();
controls.reserve(control_outputs_ggml.size());
for (ggml_tensor* control : control_outputs_ggml) {
auto control_host = restore_trailing_singleton_dims(sd::make_sd_tensor_from_ggml<float>(control), 4);
GGML_ASSERT(!control_host.empty());
controls.push_back(std::move(control_host));
}
guided_hint_cached = true;
return controls;
}
bool load_from_file(const std::string& file_path, int n_threads) {
@ -462,4 +480,4 @@ struct ControlNet : public GGMLRunner {
}
};
#endif // __CONTROL_HPP__
#endif // __CONTROL_HPP__

File diff suppressed because it is too large Load Diff

View File

@ -1,37 +1,45 @@
#ifndef __DIFFUSION_MODEL_H__
#define __DIFFUSION_MODEL_H__
#include <optional>
#include "anima.hpp"
#include "flux.hpp"
#include "mmdit.hpp"
#include "qwen_image.hpp"
#include "tensor_ggml.hpp"
#include "unet.hpp"
#include "wan.hpp"
#include "z_image.hpp"
struct DiffusionParams {
ggml_tensor* x = nullptr;
ggml_tensor* timesteps = nullptr;
ggml_tensor* context = nullptr;
ggml_tensor* c_concat = nullptr;
ggml_tensor* y = nullptr;
ggml_tensor* guidance = nullptr;
std::vector<ggml_tensor*> ref_latents = {};
bool increase_ref_index = false;
int num_video_frames = -1;
std::vector<ggml_tensor*> controls = {};
float control_strength = 0.f;
ggml_tensor* vace_context = nullptr;
float vace_strength = 1.f;
std::vector<int> skip_layers = {};
const sd::Tensor<float>* x = nullptr;
const sd::Tensor<float>* timesteps = nullptr;
const sd::Tensor<float>* context = nullptr;
const sd::Tensor<float>* c_concat = nullptr;
const sd::Tensor<float>* y = nullptr;
const sd::Tensor<int32_t>* t5_ids = nullptr;
const sd::Tensor<float>* t5_weights = nullptr;
const sd::Tensor<float>* guidance = nullptr;
const std::vector<sd::Tensor<float>>* ref_latents = nullptr;
bool increase_ref_index = false;
int num_video_frames = -1;
const std::vector<sd::Tensor<float>>* controls = nullptr;
float control_strength = 0.f;
const sd::Tensor<float>* vace_context = nullptr;
float vace_strength = 1.f;
const std::vector<int>* skip_layers = nullptr;
};
template <typename T>
static inline const sd::Tensor<T>& tensor_or_empty(const sd::Tensor<T>* tensor) {
static const sd::Tensor<T> kEmpty;
return tensor != nullptr ? *tensor : kEmpty;
}
struct DiffusionModel {
virtual std::string get_desc() = 0;
virtual bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) = 0;
virtual sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) = 0;
virtual void alloc_params_buffer() = 0;
virtual void free_params_buffer() = 0;
virtual void free_compute_buffer() = 0;
@ -93,19 +101,20 @@ struct UNetModel : public DiffusionModel {
unet.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_controls;
return unet.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.c_concat,
diffusion_params.y,
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
tensor_or_empty(diffusion_params.c_concat),
tensor_or_empty(diffusion_params.y),
diffusion_params.num_video_frames,
diffusion_params.controls,
diffusion_params.control_strength, output, output_ctx);
diffusion_params.controls ? *diffusion_params.controls : empty_controls,
diffusion_params.control_strength);
}
};
@ -158,18 +167,17 @@ struct MMDiTModel : public DiffusionModel {
mmdit.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<int> empty_skip_layers;
return mmdit.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.y,
output,
output_ctx,
diffusion_params.skip_layers);
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
tensor_or_empty(diffusion_params.y),
diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers);
}
};
@ -224,22 +232,22 @@ struct FluxModel : public DiffusionModel {
flux.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_ref_latents;
static const std::vector<int> empty_skip_layers;
return flux.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.c_concat,
diffusion_params.y,
diffusion_params.guidance,
diffusion_params.ref_latents,
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
tensor_or_empty(diffusion_params.c_concat),
tensor_or_empty(diffusion_params.y),
tensor_or_empty(diffusion_params.guidance),
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
diffusion_params.increase_ref_index,
output,
output_ctx,
diffusion_params.skip_layers);
diffusion_params.skip_layers ? *diffusion_params.skip_layers : empty_skip_layers);
}
};
@ -294,18 +302,16 @@ struct AnimaModel : public DiffusionModel {
anima.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
return anima.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.c_concat,
diffusion_params.y,
output,
output_ctx);
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
tensor_or_empty(diffusion_params.t5_ids),
tensor_or_empty(diffusion_params.t5_weights));
}
};
@ -361,21 +367,19 @@ struct WanModel : public DiffusionModel {
wan.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
return wan.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.y,
diffusion_params.c_concat,
nullptr,
diffusion_params.vace_context,
diffusion_params.vace_strength,
output,
output_ctx);
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
tensor_or_empty(diffusion_params.y),
tensor_or_empty(diffusion_params.c_concat),
sd::Tensor<float>(),
tensor_or_empty(diffusion_params.vace_context),
diffusion_params.vace_strength);
}
};
@ -432,18 +436,17 @@ struct QwenImageModel : public DiffusionModel {
qwen_image.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_ref_latents;
return qwen_image.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.ref_latents,
true, // increase_ref_index
output,
output_ctx);
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
true);
}
};
@ -499,18 +502,17 @@ struct ZImageModel : public DiffusionModel {
z_image.set_circular_axes(circular_x, circular_y);
}
bool compute(int n_threads,
DiffusionParams diffusion_params,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> compute(int n_threads,
const DiffusionParams& diffusion_params) override {
GGML_ASSERT(diffusion_params.x != nullptr);
GGML_ASSERT(diffusion_params.timesteps != nullptr);
static const std::vector<sd::Tensor<float>> empty_ref_latents;
return z_image.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.ref_latents,
true, // increase_ref_index
output,
output_ctx);
*diffusion_params.x,
*diffusion_params.timesteps,
tensor_or_empty(diffusion_params.context),
diffusion_params.ref_latents ? *diffusion_params.ref_latents : empty_ref_latents,
true);
}
};

View File

@ -1,10 +1,15 @@
#ifndef __EASYCACHE_HPP__
#define __EASYCACHE_HPP__
#include <cmath>
#include <limits>
#include <unordered_map>
#include <vector>
#include "condition_cache_utils.hpp"
#include "denoiser.hpp"
#include "ggml_extend.hpp"
#include "tensor.hpp"
struct EasyCacheConfig {
bool enabled = false;
@ -19,15 +24,15 @@ struct EasyCacheCacheEntry {
struct EasyCacheState {
EasyCacheConfig config;
Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f;
bool initialized = false;
bool initial_step = true;
bool skip_current_step = false;
bool step_active = false;
const SDCondition* anchor_condition = nullptr;
std::unordered_map<const SDCondition*, EasyCacheCacheEntry> cache_diffs;
Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f;
bool initialized = false;
bool initial_step = true;
bool skip_current_step = false;
bool step_active = false;
const void* anchor_condition = nullptr;
std::unordered_map<const void*, EasyCacheCacheEntry> cache_diffs;
std::vector<float> prev_input;
std::vector<float> prev_output;
float output_prev_norm = 0.0f;
@ -120,41 +125,30 @@ struct EasyCacheState {
return enabled() && step_active && skip_current_step;
}
bool has_cache(const SDCondition* cond) const {
bool has_cache(const void* cond) const {
auto it = cache_diffs.find(cond);
return it != cache_diffs.end() && !it->second.diff.empty();
}
void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
EasyCacheCacheEntry& entry = cache_diffs[cond];
size_t ne = static_cast<size_t>(ggml_nelements(output));
entry.diff.resize(ne);
float* out_data = (float*)output->data;
float* in_data = (float*)input->data;
for (size_t i = 0; i < ne; ++i) {
entry.diff[i] = out_data[i] - in_data[i];
}
sd::store_condition_cache_diff(&entry.diff, input, output);
}
void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) {
return;
}
copy_ggml_tensor(output, input);
float* out_data = (float*)output->data;
const std::vector<float>& diff = it->second.diff;
for (size_t i = 0; i < diff.size(); ++i) {
out_data[i] += diff[i];
}
sd::apply_condition_cache_diff(it->second.diff, input, output);
}
bool before_condition(const SDCondition* cond,
ggml_tensor* input,
ggml_tensor* output,
bool before_condition(const void* cond,
const sd::Tensor<float>& input,
sd::Tensor<float>* output,
float sigma,
int step_index) {
if (!enabled() || step_index < 0) {
if (!enabled() || step_index < 0 || output == nullptr) {
return false;
}
if (step_index != current_step_index) {
@ -181,12 +175,12 @@ struct EasyCacheState {
if (!has_prev_input || !has_prev_output || !has_cache(cond)) {
return false;
}
size_t ne = static_cast<size_t>(ggml_nelements(input));
size_t ne = static_cast<size_t>(input.numel());
if (prev_input.size() != ne) {
return false;
}
float* input_data = (float*)input->data;
last_input_change = 0.0f;
const float* input_data = input.data();
last_input_change = 0.0f;
for (size_t i = 0; i < ne; ++i) {
last_input_change += std::fabs(input_data[i] - prev_input[i]);
}
@ -211,7 +205,7 @@ struct EasyCacheState {
return false;
}
void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
if (!step_is_active()) {
return;
}
@ -220,16 +214,16 @@ struct EasyCacheState {
return;
}
size_t ne = static_cast<size_t>(ggml_nelements(input));
float* in_data = (float*)input->data;
size_t ne = static_cast<size_t>(input.numel());
const float* in_data = input.data();
prev_input.resize(ne);
for (size_t i = 0; i < ne; ++i) {
prev_input[i] = in_data[i];
}
has_prev_input = true;
float* out_data = (float*)output->data;
float output_change = 0.0f;
const float* out_data = output.data();
float output_change = 0.0f;
if (has_prev_output && prev_output.size() == ne) {
for (size_t i = 0; i < ne; ++i) {
output_change += std::fabs(out_data[i] - prev_output[i]);
@ -262,4 +256,6 @@ struct EasyCacheState {
cumulative_change_rate = 0.0f;
has_last_input_change = false;
}
};
};
#endif

View File

@ -341,12 +341,12 @@ struct ESRGAN : public GGMLRunner {
return success;
}
ggml_cgraph* build_graph(ggml_tensor* x) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor) {
if (!rrdb_net)
return nullptr;
constexpr int kGraphNodes = 1 << 16; // 65k
ggml_cgraph* gf = new_graph_custom(kGraphNodes);
x = to_backend(x);
ggml_tensor* x = make_input(x_tensor);
auto runner_ctx = get_context();
ggml_tensor* out = rrdb_net->forward(&runner_ctx, x);
@ -354,15 +354,12 @@ struct ESRGAN : public GGMLRunner {
return gf;
}
bool compute(const int n_threads,
ggml_tensor* x,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
sd::Tensor<float> compute(const int n_threads,
const sd::Tensor<float>& x) {
auto get_graph = [&]() -> ggml_cgraph* { return build_graph(x); };
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
return result;
}
};
#endif // __ESRGAN_HPP__
#endif // __ESRGAN_HPP__

View File

@ -1178,6 +1178,7 @@ namespace Flux {
std::vector<float> pe_vec;
std::vector<float> mod_index_arange_vec;
std::vector<float> dct_vec;
sd::Tensor<float> guidance_tensor;
SDVersion version;
bool use_mask = false;
@ -1353,29 +1354,42 @@ namespace Flux {
return dct;
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* c_concat,
ggml_tensor* y,
ggml_tensor* guidance,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false,
std::vector<int> skip_layers = {}) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<float>& c_concat_tensor = {},
const sd::Tensor<float>& y_tensor = {},
const sd::Tensor<float>& guidance_tensor = {},
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
bool increase_ref_index = false,
std::vector<int> skip_layers = {}) {
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
if (flux_params.guidance_embed || flux_params.is_chroma) {
if (!guidance_tensor.empty()) {
this->guidance_tensor = guidance_tensor;
if (flux_params.is_chroma) {
this->guidance_tensor.fill_(0.f);
}
}
}
ggml_tensor* guidance = make_optional_input(this->guidance_tensor);
std::vector<ggml_tensor*> ref_latents;
ref_latents.reserve(ref_latents_tensor.size());
for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
}
GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(FLUX_GRAPH_SIZE);
ggml_tensor* mod_index_arange = nullptr;
ggml_tensor* dct = nullptr; // for chroma radiance
x = to_backend(x);
context = to_backend(context);
if (c_concat != nullptr) {
c_concat = to_backend(c_concat);
}
if (flux_params.is_chroma) {
guidance = ggml_set_f32(guidance, 0);
if (!use_mask) {
y = nullptr;
}
@ -1385,16 +1399,6 @@ namespace Flux {
mod_index_arange = ggml_new_tensor_1d(compute_ctx, GGML_TYPE_F32, mod_index_arange_vec.size());
set_backend_tensor_data(mod_index_arange, mod_index_arange_vec.data());
}
y = to_backend(y);
timesteps = to_backend(timesteps);
if (flux_params.guidance_embed || flux_params.is_chroma) {
guidance = to_backend(guidance);
}
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
}
std::set<int> txt_arange_dims;
if (sd_version_is_flux2(version)) {
txt_arange_dims = {3};
@ -1455,18 +1459,16 @@ namespace Flux {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* c_concat,
ggml_tensor* y,
ggml_tensor* guidance,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<float>& c_concat = {},
const sd::Tensor<float>& y = {},
const sd::Tensor<float>& guidance = {},
const std::vector<sd::Tensor<float>>& ref_latents = {},
bool increase_ref_index = false,
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w]
// timesteps: [N, ]
// context: [N, max_position, hidden_size]
@ -1476,7 +1478,8 @@ namespace Flux {
return build_graph(x, timesteps, context, c_concat, y, guidance, ref_latents, increase_ref_index, skip_layers);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
return result;
}
void test() {
@ -1485,41 +1488,51 @@ namespace Flux {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// cpu f16:
// cuda f16: nan
// cuda q8_0: pass
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 128, 1);
sd::Tensor<float> x({16, 16, 128, 1});
// ggml_set_f32(x, 0.01f);
// auto x = load_tensor_from_file(work_ctx, "chroma_x.bin");
// auto x = load_tensor_from_file(ctx, "chroma_x.bin");
// print_ggml_tensor(x);
std::vector<float> timesteps_vec(1, 1.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
std::vector<float> guidance_vec(1, 0.f);
auto guidance = vector_to_ggml_tensor(work_ctx, guidance_vec);
auto guidance = sd::Tensor<float>::from_vector(guidance_vec);
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 15360, 256, 1);
sd::Tensor<float> context({15360, 256, 1});
// ggml_set_f32(context, 0.01f);
// auto context = load_tensor_from_file(work_ctx, "chroma_context.bin");
// auto context = load_tensor_from_file(ctx, "chroma_context.bin");
// print_ggml_tensor(context);
// auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, 1);
// auto y = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, 768, 1);
// ggml_set_f32(y, 0.01f);
auto y = nullptr;
// print_ggml_tensor(y);
ggml_tensor* out = nullptr;
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = compute(8,
x,
timesteps,
context,
{},
{},
guidance,
{},
false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("flux test done in %lldms", t1 - t0);
}
}

View File

@ -13,6 +13,7 @@
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <regex>
#include <set>
@ -27,6 +28,7 @@
#include "ggml.h"
#include "model.h"
#include "tensor.hpp"
#ifdef SD_USE_CUDA
#include "ggml-cuda.h"
@ -49,6 +51,7 @@
#endif
#include "rng.hpp"
#include "tensor_ggml.hpp"
#include "util.h"
#define EPS 1e-05f
@ -205,14 +208,6 @@ __STATIC_INLINE__ float sd_image_get_f32(sd_image_t image, int64_t iw, int64_t i
return value;
}
__STATIC_INLINE__ float sd_image_get_f32(sd_image_f32_t image, int64_t iw, int64_t ih, int64_t ic, bool scale = true) {
float value = *(image.data + ih * image.width * image.channel + iw * image.channel + ic);
if (scale) {
value /= 255.f;
}
return value;
}
__STATIC_INLINE__ void print_ggml_tensor(ggml_tensor* tensor, bool shape_only = false, const char* mark = "") {
printf("%s (%s): shape(%zu, %zu, %zu, %zu)\n", mark, ggml_type_name(tensor->type), tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]);
fflush(stdout);
@ -250,6 +245,56 @@ __STATIC_INLINE__ void print_ggml_tensor(ggml_tensor* tensor, bool shape_only =
}
}
template <typename T>
__STATIC_INLINE__ void print_sd_tensor(const sd::Tensor<T>& tensor, bool shape_only = false, const char* mark = "") {
printf("%s: shape(", mark);
for (size_t i = 0; i < static_cast<size_t>(tensor.dim()); ++i) {
printf("%s%lld", i == 0 ? "" : ", ", static_cast<long long>(tensor.shape()[i]));
}
printf(")\n");
fflush(stdout);
if (shape_only) {
return;
}
int range = 3;
std::vector<int64_t> shape = tensor.shape();
while (shape.size() < 4) {
shape.push_back(1);
}
for (int64_t i3 = 0; i3 < shape[3]; i3++) {
if (i3 >= range && i3 + range < shape[3]) {
continue;
}
for (int64_t i2 = 0; i2 < shape[2]; i2++) {
if (i2 >= range && i2 + range < shape[2]) {
continue;
}
for (int64_t i1 = 0; i1 < shape[1]; i1++) {
if (i1 >= range && i1 + range < shape[1]) {
continue;
}
for (int64_t i0 = 0; i0 < shape[0]; i0++) {
if (i0 >= range && i0 + range < shape[0]) {
continue;
}
size_t offset = static_cast<size_t>(i0 + shape[0] * (i1 + shape[1] * (i2 + shape[2] * i3)));
printf(" [%lld, %lld, %lld, %lld] = ", static_cast<long long>(i3), static_cast<long long>(i2), static_cast<long long>(i1), static_cast<long long>(i0));
if constexpr (std::is_same_v<T, float>) {
printf("%f\n", tensor[static_cast<int64_t>(offset)]);
} else if constexpr (std::is_same_v<T, ggml_fp16_t>) {
printf("%f\n", ggml_fp16_to_fp32(tensor[static_cast<int64_t>(offset)]));
} else if constexpr (std::is_same_v<T, int32_t>) {
printf("%d\n", tensor[static_cast<int64_t>(offset)]);
} else if constexpr (std::is_same_v<T, int64_t>) {
printf("%lld\n", static_cast<long long>(tensor[static_cast<int64_t>(offset)]));
}
fflush(stdout);
}
}
}
}
}
__STATIC_INLINE__ void ggml_ext_tensor_iter(
ggml_tensor* tensor,
const std::function<void(ggml_tensor*, int64_t, int64_t, int64_t, int64_t)>& fn) {
@ -475,99 +520,6 @@ __STATIC_INLINE__ void ggml_ext_tensor_apply_mask(ggml_tensor* image_data,
}
}
__STATIC_INLINE__ void sd_image_f32_to_ggml_tensor(sd_image_f32_t image,
ggml_tensor* tensor,
bool scale = true) {
GGML_ASSERT(image.width == tensor->ne[0]);
GGML_ASSERT(image.height == tensor->ne[1]);
GGML_ASSERT(image.channel == tensor->ne[2]);
GGML_ASSERT(1 == tensor->ne[3]);
GGML_ASSERT(tensor->type == GGML_TYPE_F32);
ggml_ext_tensor_iter(tensor, [&](ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = sd_image_get_f32(image, i0, i1, i2, scale);
ggml_ext_tensor_set_f32(tensor, value, i0, i1, i2, i3);
});
}
__STATIC_INLINE__ void ggml_ext_tensor_split_2d(ggml_tensor* input,
ggml_tensor* output,
int x,
int y) {
int64_t width = output->ne[0];
int64_t height = output->ne[1];
int64_t channels = output->ne[2];
int64_t ne3 = output->ne[3];
int64_t input_width = input->ne[0];
int64_t input_height = input->ne[1];
GGML_ASSERT(input->type == GGML_TYPE_F32 && output->type == GGML_TYPE_F32);
for (int iy = 0; iy < height; iy++) {
for (int ix = 0; ix < width; ix++) {
for (int k = 0; k < channels; k++) {
for (int l = 0; l < ne3; l++) {
float value = ggml_ext_tensor_get_f32(input, (ix + x) % input_width, (iy + y) % input_height, k, l);
ggml_ext_tensor_set_f32(output, value, ix, iy, k, l);
}
}
}
}
}
// unclamped -> expects x in the range [0-1]
__STATIC_INLINE__ float smootherstep_f32(const float x) {
GGML_ASSERT(x >= 0.f && x <= 1.f);
return x * x * x * (x * (6.0f * x - 15.0f) + 10.0f);
}
__STATIC_INLINE__ void ggml_ext_tensor_merge_2d(ggml_tensor* input,
ggml_tensor* output,
int x,
int y,
int overlap_x,
int overlap_y,
bool circular_x,
bool circular_y,
int x_skip = 0,
int y_skip = 0) {
int64_t width = input->ne[0];
int64_t height = input->ne[1];
int64_t channels = input->ne[2];
int64_t ne3 = input->ne[3];
int64_t img_width = output->ne[0];
int64_t img_height = output->ne[1];
GGML_ASSERT(input->type == GGML_TYPE_F32 && output->type == GGML_TYPE_F32);
for (int iy = y_skip; iy < height; iy++) {
for (int ix = x_skip; ix < width; ix++) {
for (int k = 0; k < channels; k++) {
for (int l = 0; l < ne3; l++) {
float new_value = ggml_ext_tensor_get_f32(input, ix, iy, k, l);
if (overlap_x > 0 || overlap_y > 0) { // blend colors in overlapped area
float old_value = ggml_ext_tensor_get_f32(output, (x + ix) % img_width, (y + iy) % img_height, k, l);
const float x_f_0 = (circular_x || (overlap_x > 0 && x > 0)) ? (ix - x_skip) / float(overlap_x) : 1;
const float x_f_1 = (circular_x || (overlap_x > 0 && x < (img_width - width))) ? (width - ix) / float(overlap_x) : 1;
const float y_f_0 = (circular_y || (overlap_y > 0 && y > 0)) ? (iy - y_skip) / float(overlap_y) : 1;
const float y_f_1 = (circular_y || (overlap_y > 0 && y < (img_height - height))) ? (height - iy) / float(overlap_y) : 1;
const float x_f = std::min(std::min(x_f_0, x_f_1), 1.f);
const float y_f = std::min(std::min(y_f_0, y_f_1), 1.f);
ggml_ext_tensor_set_f32(
output,
old_value + new_value * smootherstep_f32(y_f) * smootherstep_f32(x_f),
(x + ix) % img_width, (y + iy) % img_height, k, l);
} else {
ggml_ext_tensor_set_f32(output, new_value, (x + ix) % img_width, (y + iy) % img_height, k, l);
}
}
}
}
}
}
__STATIC_INLINE__ float ggml_ext_tensor_mean(ggml_tensor* src) {
float mean = 0.0f;
int64_t nelements = ggml_nelements(src);
@ -832,22 +784,102 @@ __STATIC_INLINE__ void sd_tiling_calc_tiles(int& num_tiles_dim,
}
// Tiling
__STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
ggml_tensor* output,
const int scale,
const int p_tile_size_x,
const int p_tile_size_y,
const float tile_overlap_factor,
const bool circular_x,
const bool circular_y,
on_tile_process on_processing,
bool slient = false) {
output = ggml_set_f32(output, 0);
int input_width = (int)input->ne[0];
int input_height = (int)input->ne[1];
int output_width = (int)output->ne[0];
int output_height = (int)output->ne[1];
__STATIC_INLINE__ int64_t sd_tensor_plane_size(const sd::Tensor<float>& tensor) {
GGML_ASSERT(tensor.dim() >= 2);
return tensor.shape()[0] * tensor.shape()[1];
}
__STATIC_INLINE__ sd::Tensor<float> sd_tensor_split_2d(const sd::Tensor<float>& input, int width, int height, int x, int y) {
GGML_ASSERT(input.dim() >= 4);
std::vector<int64_t> output_shape = input.shape();
output_shape[0] = width;
output_shape[1] = height;
sd::Tensor<float> output(std::move(output_shape));
int64_t input_width = input.shape()[0];
int64_t input_height = input.shape()[1];
int64_t input_plane = sd_tensor_plane_size(input);
int64_t output_plane = sd_tensor_plane_size(output);
int64_t plane_count = input.numel() / input_plane;
for (int iy = 0; iy < height; iy++) {
for (int ix = 0; ix < width; ix++) {
int64_t src_xy = (ix + x) % input_width + input_width * ((iy + y) % input_height);
int64_t dst_xy = ix + width * iy;
for (int64_t plane = 0; plane < plane_count; ++plane) {
output[plane * output_plane + dst_xy] = input[plane * input_plane + src_xy];
}
}
}
return output;
}
__STATIC_INLINE__ void sd_tensor_merge_2d(const sd::Tensor<float>& input,
sd::Tensor<float>* output,
int x,
int y,
int overlap_x,
int overlap_y,
bool circular_x,
bool circular_y,
int x_skip = 0,
int y_skip = 0) {
GGML_ASSERT(output != nullptr);
int64_t width = input.shape()[0];
int64_t height = input.shape()[1];
int64_t img_width = output->shape()[0];
int64_t img_height = output->shape()[1];
int64_t input_plane = sd_tensor_plane_size(input);
int64_t output_plane = sd_tensor_plane_size(*output);
int64_t plane_count = input.numel() / input_plane;
GGML_ASSERT(output->numel() / output_plane == plane_count);
// unclamped -> expects x in the range [0-1]
auto smootherstep_f32 = [](const float x) -> float {
GGML_ASSERT(x >= 0.f && x <= 1.f);
return x * x * x * (x * (6.0f * x - 15.0f) + 10.0f);
};
for (int iy = y_skip; iy < height; iy++) {
for (int ix = x_skip; ix < width; ix++) {
int64_t src_xy = ix + width * iy;
int64_t ox = (x + ix) % img_width;
int64_t oy = (y + iy) % img_height;
int64_t dst_xy = ox + img_width * oy;
for (int64_t plane = 0; plane < plane_count; ++plane) {
float new_value = input[plane * input_plane + src_xy];
if (overlap_x > 0 || overlap_y > 0) {
float old_value = (*output)[plane * output_plane + dst_xy];
const float x_f_0 = (circular_x || (overlap_x > 0 && x > 0)) ? (ix - x_skip) / float(overlap_x) : 1.f;
const float x_f_1 = (circular_x || (overlap_x > 0 && x < (img_width - width))) ? (width - ix) / float(overlap_x) : 1.f;
const float y_f_0 = (circular_y || (overlap_y > 0 && y > 0)) ? (iy - y_skip) / float(overlap_y) : 1.f;
const float y_f_1 = (circular_y || (overlap_y > 0 && y < (img_height - height))) ? (height - iy) / float(overlap_y) : 1.f;
const float x_f = std::min(std::min(x_f_0, x_f_1), 1.f);
const float y_f = std::min(std::min(y_f_0, y_f_1), 1.f);
(*output)[plane * output_plane + dst_xy] =
old_value + new_value * smootherstep_f32(y_f) * smootherstep_f32(x_f);
} else {
(*output)[plane * output_plane + dst_xy] = new_value;
}
}
}
}
}
template <typename Fn>
__STATIC_INLINE__ sd::Tensor<float> process_tiles_2d(const sd::Tensor<float>& input,
int output_width,
int output_height,
int scale,
int p_tile_size_x,
int p_tile_size_y,
float tile_overlap_factor,
bool circular_x,
bool circular_y,
Fn&& on_processing,
bool silent = false) {
sd::Tensor<float> output;
int input_width = static_cast<int>(input.shape()[0]);
int input_height = static_cast<int>(input.shape()[1]);
GGML_ASSERT(((input_width / output_width) == (input_height / output_height)) &&
((output_width / input_width) == (output_height / input_height)));
@ -856,8 +888,7 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
int small_width = output_width;
int small_height = output_height;
bool decode = output_width > input_width;
bool decode = output_width > input_width;
if (decode) {
small_width = input_width;
small_height = input_height;
@ -871,25 +902,16 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
float tile_overlap_factor_y;
sd_tiling_calc_tiles(num_tiles_y, tile_overlap_factor_y, small_height, p_tile_size_y, tile_overlap_factor, circular_y);
if (!slient) {
LOG_DEBUG("num tiles : %d, %d ", num_tiles_x, num_tiles_y);
LOG_DEBUG("optimal overlap : %f, %f (targeting %f)", tile_overlap_factor_x, tile_overlap_factor_y, tile_overlap_factor);
}
int tile_overlap_x = (int32_t)(p_tile_size_x * tile_overlap_factor_x);
int tile_overlap_x = static_cast<int32_t>(p_tile_size_x * tile_overlap_factor_x);
int non_tile_overlap_x = p_tile_size_x - tile_overlap_x;
int tile_overlap_y = (int32_t)(p_tile_size_y * tile_overlap_factor_y);
int tile_overlap_y = static_cast<int32_t>(p_tile_size_y * tile_overlap_factor_y);
int non_tile_overlap_y = p_tile_size_y - tile_overlap_y;
int tile_size_x = p_tile_size_x < small_width ? p_tile_size_x : small_width;
int tile_size_y = p_tile_size_y < small_height ? p_tile_size_y : small_height;
int tile_size_x = p_tile_size_x < small_width ? p_tile_size_x : small_width;
int tile_size_y = p_tile_size_y < small_height ? p_tile_size_y : small_height;
int input_tile_size_x = tile_size_x;
int input_tile_size_y = tile_size_y;
int output_tile_size_x = tile_size_x;
int output_tile_size_y = tile_size_y;
if (decode) {
output_tile_size_x *= scale;
output_tile_size_y *= scale;
@ -898,41 +920,23 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
input_tile_size_y *= scale;
}
ggml_init_params params = {};
params.mem_size += input_tile_size_x * input_tile_size_y * input->ne[2] * input->ne[3] * sizeof(float); // input chunk
params.mem_size += output_tile_size_x * output_tile_size_y * output->ne[2] * output->ne[3] * sizeof(float); // output chunk
params.mem_size += 3 * ggml_tensor_overhead();
params.mem_buffer = nullptr;
params.no_alloc = false;
if (!slient) {
LOG_DEBUG("tile work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
}
// draft context
ggml_context* tiles_ctx = ggml_init(params);
if (!tiles_ctx) {
LOG_ERROR("ggml_init() failed");
return;
}
// tiling
ggml_tensor* input_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, input_tile_size_x, input_tile_size_y, input->ne[2], input->ne[3]);
ggml_tensor* output_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, output_tile_size_x, output_tile_size_y, output->ne[2], output->ne[3]);
int num_tiles = num_tiles_x * num_tiles_y;
if (!slient) {
int num_tiles = num_tiles_x * num_tiles_y;
int tile_count = 1;
bool last_y = false;
bool last_x = false;
float last_time = 0.0f;
if (!silent) {
LOG_DEBUG("num tiles : %d, %d ", num_tiles_x, num_tiles_y);
LOG_DEBUG("optimal overlap : %f, %f (targeting %f)", tile_overlap_factor_x, tile_overlap_factor_y, tile_overlap_factor);
LOG_DEBUG("processing %i tiles", num_tiles);
pretty_progress(0, num_tiles, 0.0f);
}
int tile_count = 1;
bool last_y = false, last_x = false;
float last_time = 0.0f;
for (int y = 0; y < small_height && !last_y; y += non_tile_overlap_y) {
int dy = 0;
if (!circular_y && y + tile_size_y >= small_height) {
int _y = y;
y = small_height - tile_size_y;
dy = _y - y;
int original_y = y;
y = small_height - tile_size_y;
dy = original_y - y;
if (decode) {
dy *= scale;
}
@ -941,9 +945,9 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
for (int x = 0; x < small_width && !last_x; x += non_tile_overlap_x) {
int dx = 0;
if (!circular_x && x + tile_size_x >= small_width) {
int _x = x;
x = small_width - tile_size_x;
dx = _x - x;
int original_x = x;
x = small_width - tile_size_x;
dx = original_x - x;
if (decode) {
dx *= scale;
}
@ -958,38 +962,37 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
int overlap_x_out = decode ? tile_overlap_x * scale : tile_overlap_x;
int overlap_y_out = decode ? tile_overlap_y * scale : tile_overlap_y;
int64_t t1 = ggml_time_ms();
ggml_ext_tensor_split_2d(input, input_tile, x_in, y_in);
if (on_processing(input_tile, output_tile, false)) {
ggml_ext_tensor_merge_2d(output_tile, output, x_out, y_out, overlap_x_out, overlap_y_out, circular_x, circular_y, dx, dy);
int64_t t1 = ggml_time_ms();
auto input_tile = sd_tensor_split_2d(input, input_tile_size_x, input_tile_size_y, x_in, y_in);
auto output_tile = on_processing(input_tile);
if (output_tile.empty()) {
return {};
}
GGML_ASSERT(output_tile.shape()[0] == output_tile_size_x && output_tile.shape()[1] == output_tile_size_y);
if (output.empty()) {
std::vector<int64_t> output_shape = output_tile.shape();
output_shape[0] = output_width;
output_shape[1] = output_height;
output = sd::Tensor<float>::zeros(std::move(output_shape));
}
sd_tensor_merge_2d(output_tile, &output, x_out, y_out, overlap_x_out, overlap_y_out, circular_x, circular_y, dx, dy);
if (!silent) {
int64_t t2 = ggml_time_ms();
last_time = (t2 - t1) / 1000.0f;
pretty_progress(tile_count, num_tiles, last_time);
} else {
LOG_ERROR("Failed to process patch %d at (%d, %d)", tile_count, x, y);
}
tile_count++;
}
last_x = false;
}
if (!slient) {
if (tile_count < num_tiles) {
pretty_progress(num_tiles, num_tiles, last_time);
}
if (!silent && tile_count < num_tiles) {
pretty_progress(num_tiles, num_tiles, last_time);
}
ggml_free(tiles_ctx);
}
__STATIC_INLINE__ void sd_tiling(ggml_tensor* input,
ggml_tensor* output,
const int scale,
const int tile_size,
const float tile_overlap_factor,
const bool circular_x,
const bool circular_y,
on_tile_process on_processing) {
sd_tiling_non_square(input, output, scale, tile_size, tile_size, tile_overlap_factor, circular_x, circular_y, on_processing);
if (output.empty()) {
return {};
}
return output;
}
__STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm_32(ggml_context* ctx,
@ -1588,6 +1591,18 @@ __STATIC_INLINE__ void set_timestep_embedding(std::vector<float> timesteps,
memcpy(((char*)embedding->data), ((char*)embedding_vec.data()), ggml_nbytes(embedding));
}
__STATIC_INLINE__ void set_timestep_embedding(std::vector<float> timesteps,
sd::Tensor<float>* embedding,
int dim,
int max_period = 10000) {
GGML_ASSERT(embedding != nullptr);
std::vector<float> embedding_vec = timestep_embedding(timesteps, dim, max_period);
if (embedding->numel() != static_cast<int64_t>(embedding_vec.size())) {
embedding->resize({dim, static_cast<int64_t>(timesteps.size())});
}
std::copy(embedding_vec.begin(), embedding_vec.end(), embedding->values().begin());
}
__STATIC_INLINE__ ggml_tensor* new_timestep_embedding(ggml_context* ctx,
std::vector<float> timesteps,
int dim,
@ -1705,6 +1720,32 @@ protected:
bool circular_x_enabled = false;
bool circular_y_enabled = false;
template <typename T>
static sd::Tensor<T> take_or_empty(std::optional<sd::Tensor<T>> tensor) {
if (!tensor.has_value()) {
return {};
}
return std::move(*tensor);
}
template <typename T>
static sd::Tensor<T> restore_trailing_singleton_dims(std::optional<sd::Tensor<T>> tensor,
size_t expected_dim) {
return restore_trailing_singleton_dims(take_or_empty(std::move(tensor)), expected_dim);
}
template <typename T>
static sd::Tensor<T> restore_trailing_singleton_dims(sd::Tensor<T> tensor,
size_t expected_dim) {
if (tensor.empty()) {
return tensor;
}
while (static_cast<size_t>(tensor.dim()) < expected_dim) {
tensor.unsqueeze_(tensor.dim());
}
return tensor;
}
void alloc_params_ctx() {
ggml_init_params params;
params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead());
@ -2042,6 +2083,29 @@ public:
backend_tensor_data_map[tensor] = data;
}
template <typename T>
ggml_tensor* make_input(const sd::Tensor<T>& tensor) {
ggml_tensor* input = sd::make_ggml_tensor(compute_ctx, tensor, false);
set_backend_tensor_data(input, tensor.data());
return input;
}
template <typename T>
ggml_tensor* make_optional_input(const sd::Tensor<T>& tensor) {
if (tensor.empty()) {
return nullptr;
}
return make_input(tensor);
}
template <typename T>
ggml_tensor* make_optional_input(const sd::Tensor<T>* tensor) {
if (tensor == nullptr) {
return nullptr;
}
return make_input(*tensor);
}
ggml_tensor* to_backend(ggml_tensor* tensor) {
GGML_ASSERT(compute_ctx != nullptr);
if (tensor == nullptr) {
@ -2070,24 +2134,24 @@ public:
return ggml_get_tensor(cache_ctx, name.c_str());
}
bool compute(get_graph_cb_t get_graph,
int n_threads,
bool free_compute_buffer_immediately = true,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
template <typename T>
std::optional<sd::Tensor<T>> compute(get_graph_cb_t get_graph,
int n_threads,
bool free_compute_buffer_immediately,
bool no_return = false) {
if (!offload_params_to_runtime_backend()) {
LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str());
return false;
return std::nullopt;
}
if (!alloc_compute_buffer(get_graph)) {
LOG_ERROR("%s alloc compute buffer failed", get_desc().c_str());
return false;
return std::nullopt;
}
reset_compute_ctx();
ggml_cgraph* gf = get_compute_graph(get_graph);
if (!ggml_gallocr_alloc_graph(compute_allocr, gf)) {
LOG_ERROR("%s alloc compute graph failed", get_desc().c_str());
return false;
return std::nullopt;
}
copy_data_to_backend_tensor();
if (ggml_backend_is_cpu(runtime_backend)) {
@ -2097,26 +2161,19 @@ public:
ggml_status status = ggml_backend_graph_compute(runtime_backend, gf);
if (status != GGML_STATUS_SUCCESS) {
LOG_ERROR("%s compute failed: %s", get_desc().c_str(), ggml_status_to_string(status));
return false;
return std::nullopt;
}
#ifdef GGML_PERF
ggml_graph_print(gf);
#endif
copy_cache_tensors_to_cache_buffer();
if (output != nullptr) {
auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str());
if (*output == nullptr && output_ctx != nullptr) {
*output = ggml_dup_tensor(output_ctx, result);
}
if (*output != nullptr) {
ggml_ext_backend_tensor_get_and_sync(runtime_backend, result, (*output)->data, 0, ggml_nbytes(*output));
}
auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str());
std::optional<sd::Tensor<T>> output;
if (!no_return) {
output = sd::make_sd_tensor_from_ggml<T>(result);
}
if (free_compute_buffer_immediately) {
free_compute_buffer();
}
return true;
return output;
}
void set_flash_attention_enabled(bool enabled) {

View File

@ -1,6 +1,8 @@
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include "ggml.h"
#include "tensor.hpp"
const float wan_21_latent_rgb_proj[16][3] = {
{0.015123f, -0.148418f, 0.479828f},
@ -232,3 +234,67 @@ void preview_latent_video(uint8_t* buffer, ggml_tensor* latents, const float (*l
}
}
}
static inline bool preview_latent_tensor_is_video(const sd::Tensor<float>& latents) {
return latents.dim() == 5;
}
void preview_latent_video(uint8_t* buffer, const sd::Tensor<float>& latents, const float (*latent_rgb_proj)[3], const float latent_rgb_bias[3], int patch_size) {
uint32_t latent_width = static_cast<uint32_t>(latents.shape()[0]);
uint32_t latent_height = static_cast<uint32_t>(latents.shape()[1]);
bool is_video = preview_latent_tensor_is_video(latents);
uint32_t frames = is_video ? static_cast<uint32_t>(latents.shape()[2]) : 1;
uint32_t dim = is_video ? static_cast<uint32_t>(latents.shape()[3]) : static_cast<uint32_t>(latents.shape()[2]);
uint32_t rgb_width = latent_width * patch_size;
uint32_t rgb_height = latent_height * patch_size;
uint32_t unpatched_dim = dim / (patch_size * patch_size);
for (uint32_t k = 0; k < frames; k++) {
for (uint32_t rgb_x = 0; rgb_x < rgb_width; rgb_x++) {
for (uint32_t rgb_y = 0; rgb_y < rgb_height; rgb_y++) {
uint32_t latent_x = rgb_x / patch_size;
uint32_t latent_y = rgb_y / patch_size;
uint32_t channel_offset = 0;
if (patch_size > 1) {
channel_offset = ((rgb_y % patch_size) * patch_size + (rgb_x % patch_size));
}
size_t pixel_id = k * rgb_width * rgb_height + rgb_y * rgb_width + rgb_x;
auto latent_value = [&](uint32_t latent_channel) -> float {
return is_video
? latents.values()[latent_x + latent_width * (latent_y + latent_height * (k + frames * latent_channel))]
: latents.values()[latent_x + latent_width * (latent_y + latent_height * latent_channel)];
};
float r = 0.f, g = 0.f, b = 0.f;
if (latent_rgb_proj != nullptr) {
for (uint32_t d = 0; d < unpatched_dim; d++) {
uint32_t latent_channel = d * patch_size * patch_size + channel_offset;
float value = latent_value(latent_channel);
r += value * latent_rgb_proj[d][0];
g += value * latent_rgb_proj[d][1];
b += value * latent_rgb_proj[d][2];
}
} else {
r = latent_value(0);
g = latent_value(1);
b = latent_value(2);
}
if (latent_rgb_bias != nullptr) {
r += latent_rgb_bias[0];
g += latent_rgb_bias[1];
b += latent_rgb_bias[2];
}
r = std::min(1.0f, std::max(0.0f, r * .5f + .5f));
g = std::min(1.0f, std::max(0.0f, g * .5f + .5f));
b = std::min(1.0f, std::max(0.0f, b * .5f + .5f));
buffer[pixel_id * 3 + 0] = (uint8_t)(r * 255);
buffer[pixel_id * 3 + 1] = (uint8_t)(g * 255);
buffer[pixel_id * 3 + 2] = (uint8_t)(b * 255);
}
}
}
}

View File

@ -194,6 +194,7 @@ namespace LLM {
bool padding = false) {
if (add_bos_token) {
tokens.insert(tokens.begin(), BOS_TOKEN_ID);
weights.insert(weights.begin(), 1.f);
}
if (max_length > 0 && padding) {
size_t n = static_cast<size_t>(std::ceil(tokens.size() * 1.f / max_length));
@ -1180,16 +1181,17 @@ namespace LLM {
return hidden_states;
}
ggml_cgraph* build_graph(ggml_tensor* input_ids,
ggml_tensor* attention_mask,
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
ggml_cgraph* build_graph(const sd::Tensor<int32_t>& input_ids_tensor,
const sd::Tensor<float>& attention_mask_tensor,
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds_tensor,
std::set<int> out_layers) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
input_ids = to_backend(input_ids);
for (auto& image_embed : image_embeds) {
image_embed.second = to_backend(image_embed.second);
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* input_ids = make_input(input_ids_tensor);
std::vector<std::pair<int, ggml_tensor*>> image_embeds;
image_embeds.reserve(image_embeds_tensor.size());
for (const auto& [idx, embed_tensor] : image_embeds_tensor) {
ggml_tensor* embed = make_input(embed_tensor);
image_embeds.emplace_back(idx, embed);
}
int64_t n_tokens = input_ids->ne[0];
@ -1213,8 +1215,9 @@ namespace LLM {
input_pos_vec.size());
set_backend_tensor_data(input_pos, input_pos_vec.data());
if (attention_mask != nullptr) {
attention_mask = to_backend(attention_mask);
ggml_tensor* attention_mask = nullptr;
if (!attention_mask_tensor.empty()) {
attention_mask = make_input(attention_mask_tensor);
} else {
attention_mask_vec.resize(n_tokens * n_tokens);
for (int i0 = 0; i0 < n_tokens; i0++) {
@ -1239,17 +1242,15 @@ namespace LLM {
return gf;
}
bool compute(const int n_threads,
ggml_tensor* input_ids,
ggml_tensor* attention_mask,
std::vector<std::pair<int, ggml_tensor*>> image_embeds,
std::set<int> out_layers,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(const int n_threads,
const sd::Tensor<int32_t>& input_ids,
const sd::Tensor<float>& attention_mask,
const std::vector<std::pair<int, sd::Tensor<float>>>& image_embeds,
std::set<int> out_layers) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(input_ids, attention_mask, image_embeds, out_layers);
};
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true));
}
int64_t get_num_image_tokens(int64_t t, int64_t h, int64_t w) {
@ -1288,8 +1289,9 @@ namespace LLM {
return image;
}
ggml_cgraph* build_encode_image_graph(ggml_tensor* image) {
ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
ggml_cgraph* build_encode_image_graph(const sd::Tensor<float>& image_tensor) {
ggml_cgraph* gf = new_graph_custom(LLM_GRAPH_SIZE);
ggml_tensor* image = make_input(image_tensor);
GGML_ASSERT(image->ne[1] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
GGML_ASSERT(image->ne[0] % (params.vision.patch_size * params.vision.spatial_merge_size) == 0);
@ -1301,8 +1303,6 @@ namespace LLM {
int llm_grid_w = grid_w / params.vision.spatial_merge_size;
int vit_merger_window_size = params.vision.window_size / params.vision.patch_size / params.vision.spatial_merge_size;
image = to_backend(image);
auto pixel_values = process_image(compute_ctx, image);
// window index
@ -1411,14 +1411,12 @@ namespace LLM {
return gf;
}
void encode_image(const int n_threads,
ggml_tensor* image,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> encode_image(const int n_threads,
const sd::Tensor<float>& image) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_encode_image_graph(image);
};
GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, false));
}
};
@ -1497,39 +1495,41 @@ namespace LLM {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
bool test_mistral = false;
bool test_qwen3 = true;
bool test_vit = false;
bool test_decoder_with_vit = false;
if (test_decoder_with_vit) {
ggml_tensor* image_embed = nullptr;
sd::Tensor<float> image_embed;
{
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
print_ggml_tensor(image, false, "image");
ggml_tensor* out = nullptr;
auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin");
print_sd_tensor(image, false, "image");
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
model.encode_image(8, image, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = model.encode_image(8, image);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out, false, "image_embed");
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out, false, "image_embed");
image_embed = out;
LOG_DEBUG("llm encode_image test done in %lldms", t1 - t0);
}
std::string placeholder = "<|image_pad|>";
std::string img_prompt = "Picture 1: <|vision_start|>"; // [24669, 220, 16, 25, 220, 151652]
int64_t num_image_tokens = image_embed->ne[1];
int64_t num_image_tokens = image_embed.shape()[1];
img_prompt.reserve(num_image_tokens * placeholder.size());
for (int i = 0; i < num_image_tokens; i++) {
img_prompt += placeholder;
}
img_prompt += "<|vision_end|>";
std::vector<std::pair<int, ggml_tensor*>> image_embeds;
std::vector<std::pair<int, sd::Tensor<float>>> image_embeds;
image_embeds.emplace_back(64, image_embed);
std::pair<int, int> prompt_attn_range;
@ -1547,29 +1547,33 @@ namespace LLM {
printf("%d ", token);
}
printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
ggml_tensor* out = nullptr;
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, image_embeds, {}, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), image_embeds, {});
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else if (test_vit) {
// auto image = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 280, 280, 3);
// auto image = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 280, 280, 3);
// ggml_set_f32(image, 0.f);
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
print_ggml_tensor(image, false, "image");
ggml_tensor* out = nullptr;
auto image = sd::load_tensor_from_file_as_tensor<float>("qwen2vl_normalized.bin");
print_sd_tensor(image, false, "image");
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
model.encode_image(8, image, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = model.encode_image(8, image);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out, false, "out");
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out, false, "out");
// auto ref_out = load_tensor_from_file(work_ctx, "qwen2vl.bin");
// auto ref_out = load_tensor_from_file(ctx, "qwen2vl.bin");
// ggml_ext_tensor_diff(ref_out, out, 0.01f);
LOG_DEBUG("llm test done in %lldms", t1 - t0);
@ -1587,14 +1591,16 @@ namespace LLM {
printf("%d ", token);
}
printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
ggml_tensor* out = nullptr;
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, {}, {10, 20, 30}, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {10, 20, 30});
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else if (test_qwen3) {
std::pair<int, int> prompt_attn_range;
@ -1610,14 +1616,16 @@ namespace LLM {
printf("%d ", token);
}
printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
ggml_tensor* out = nullptr;
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, {}, {35}, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {35});
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0);
} else {
std::pair<int, int> prompt_attn_range;
@ -1633,14 +1641,16 @@ namespace LLM {
printf("%d ", token);
}
printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
ggml_tensor* out = nullptr;
auto input_ids = sd::Tensor<int32_t>::from_vector(tokens);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
model.compute(8, input_ids, nullptr, {}, {}, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = model.compute(8, input_ids, sd::Tensor<float>(), {}, {});
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("llm test done in %lldms", t1 - t0);
}
}

View File

@ -792,7 +792,7 @@ struct LoraModel : public GGMLRunner {
auto get_graph = [&]() -> ggml_cgraph* {
return build_lora_graph(model_tensors, version);
};
GGMLRunner::compute(get_graph, n_threads, false);
GGMLRunner::compute<float>(get_graph, n_threads, false, true);
stat();
for (auto item : original_tensor_to_final_tensor) {
ggml_tensor* original_tensor = item.first;

View File

@ -836,17 +836,17 @@ struct MMDiTRunner : public GGMLRunner {
mmdit.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* y,
std::vector<int> skip_layers = std::vector<int>()) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<float>& y_tensor = {},
std::vector<int> skip_layers = std::vector<int>()) {
ggml_cgraph* gf = new_graph_custom(MMDIT_GRAPH_SIZE);
x = to_backend(x);
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
auto runner_ctx = get_context();
ggml_tensor* out = mmdit.forward(&runner_ctx,
@ -861,14 +861,12 @@ struct MMDiTRunner : public GGMLRunner {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* y,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<float>& y = {},
std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w]
// timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 154, 4096]) or [1, max_position, hidden_size]
@ -877,7 +875,7 @@ struct MMDiTRunner : public GGMLRunner {
return build_graph(x, timesteps, context, y, skip_layers);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
void test() {
@ -886,35 +884,41 @@ struct MMDiTRunner : public GGMLRunner {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// cpu f16: pass
// cpu f32: pass
// cuda f16: pass
// cuda f32: pass
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 128, 128, 16, 1);
sd::Tensor<float> x({128, 128, 16, 1});
std::vector<float> timesteps_vec(1, 999.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
ggml_set_f32(x, 0.01f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
x.fill_(0.01f);
// print_ggml_tensor(x);
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 154, 1);
ggml_set_f32(context, 0.01f);
sd::Tensor<float> context({4096, 154, 1});
context.fill_(0.01f);
// print_ggml_tensor(context);
auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 2048, 1);
ggml_set_f32(y, 0.01f);
sd::Tensor<float> y({2048, 1});
y.fill_(0.01f);
// print_ggml_tensor(y);
ggml_tensor* out = nullptr;
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, y, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = compute(8,
x,
timesteps,
context,
y);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("mmdit test done in %lldms", t1 - t0);
}
}

View File

@ -443,11 +443,10 @@ public:
id_encoder2.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph( // ggml_allocr* allocr,
ggml_tensor* id_pixel_values,
ggml_tensor* prompt_embeds,
std::vector<bool>& class_tokens_mask,
ggml_tensor* id_embeds) {
ggml_cgraph* build_graph(const sd::Tensor<float>& id_pixel_values_tensor,
const sd::Tensor<float>& prompt_embeds_tensor,
std::vector<bool>& class_tokens_mask,
const sd::Tensor<float>& id_embeds_tensor = {}) {
ctm.clear();
ctmf16.clear();
ctmpos.clear();
@ -460,16 +459,16 @@ public:
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
ggml_tensor* id_pixel_values = make_input(id_pixel_values_tensor);
ggml_tensor* prompt_embeds = make_input(prompt_embeds_tensor);
ggml_tensor* id_embeds = make_optional_input(id_embeds_tensor);
int64_t hidden_size = prompt_embeds->ne[0];
int64_t seq_length = prompt_embeds->ne[1];
ggml_type type = GGML_TYPE_F32;
ggml_tensor* class_tokens_mask_d = ggml_new_tensor_1d(runner_ctx.ggml_ctx, type, class_tokens_mask.size());
ggml_tensor* id_pixel_values_d = to_backend(id_pixel_values);
ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
ggml_tensor* id_embeds_d = to_backend(id_embeds);
ggml_tensor* left = nullptr;
ggml_tensor* right = nullptr;
for (int i = 0; i < class_tokens_mask.size(); i++) {
@ -529,18 +528,18 @@ public:
ggml_tensor* updated_prompt_embeds = nullptr;
if (pm_version == PM_VERSION_1)
updated_prompt_embeds = id_encoder.forward(&runner_ctx,
id_pixel_values_d,
prompt_embeds_d,
id_pixel_values,
prompt_embeds,
class_tokens_mask_d,
class_tokens_mask_pos,
left, right);
else if (pm_version == PM_VERSION_2)
updated_prompt_embeds = id_encoder2.forward(&runner_ctx,
id_pixel_values_d,
prompt_embeds_d,
id_pixel_values,
prompt_embeds,
class_tokens_mask_d,
class_tokens_mask_pos,
id_embeds_d,
id_embeds,
left, right);
ggml_build_forward_expand(gf, updated_prompt_embeds);
@ -548,20 +547,16 @@ public:
return gf;
}
bool compute(const int n_threads,
ggml_tensor* id_pixel_values,
ggml_tensor* prompt_embeds,
ggml_tensor* id_embeds,
std::vector<bool>& class_tokens_mask,
ggml_tensor** updated_prompt_embeds,
ggml_context* output_ctx) {
sd::Tensor<float> compute(const int n_threads,
const sd::Tensor<float>& id_pixel_values,
const sd::Tensor<float>& prompt_embeds,
const sd::Tensor<float>& id_embeds,
std::vector<bool>& class_tokens_mask) {
auto get_graph = [&]() -> ggml_cgraph* {
// return build_graph(compute_allocr, id_pixel_values, prompt_embeds, class_tokens_mask);
return build_graph(id_pixel_values, prompt_embeds, class_tokens_mask, id_embeds);
};
// GGMLRunner::compute(get_graph, n_threads, updated_prompt_embeds);
return GGMLRunner::compute(get_graph, n_threads, true, updated_prompt_embeds, output_ctx);
return take_or_empty(GGMLRunner::compute<float>(get_graph, n_threads, true));
}
};

View File

@ -1,179 +1,241 @@
#ifndef __PREPROCESSING_HPP__
#define __PREPROCESSING_HPP__
#include <cmath>
#include <limits>
#include "ggml_extend.hpp"
#define M_PI_ 3.14159265358979323846f
void convolve(ggml_tensor* input, ggml_tensor* output, ggml_tensor* kernel, int padding) {
ggml_init_params params;
params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* ctx0 = ggml_init(params);
ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
ggml_fp32_to_fp16_row((float*)kernel->data, (ggml_fp16_t*)kernel_fp16->data, ggml_nelements(kernel));
ggml_tensor* h = ggml_conv_2d(ctx0, kernel_fp16, input, 1, 1, padding, padding, 1, 1);
ggml_cgraph* gf = ggml_new_graph(ctx0);
ggml_build_forward_expand(gf, ggml_cpy(ctx0, h, output));
ggml_graph_compute_with_ctx(ctx0, gf, 1);
ggml_free(ctx0);
static inline int64_t preprocessing_offset_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
const auto& shape = tensor.shape();
int64_t n0 = shape.size() > 0 ? shape[0] : 1;
int64_t n1 = shape.size() > 1 ? shape[1] : 1;
int64_t n2 = shape.size() > 2 ? shape[2] : 1;
return ((i3 * n2 + i2) * n1 + i1) * n0 + i0;
}
void gaussian_kernel(ggml_tensor* kernel) {
int ks_mid = static_cast<int>(kernel->ne[0] / 2);
static inline float preprocessing_get_4d(const sd::Tensor<float>& tensor, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
return tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))];
}
static inline void preprocessing_set_4d(sd::Tensor<float>& tensor, float value, int64_t i0, int64_t i1 = 0, int64_t i2 = 0, int64_t i3 = 0) {
tensor.values()[static_cast<size_t>(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value;
}
static inline sd::Tensor<float> sd_image_to_preprocessing_tensor(sd_image_t image) {
sd::Tensor<float> tensor({static_cast<int64_t>(image.width), static_cast<int64_t>(image.height), static_cast<int64_t>(image.channel), 1});
for (uint32_t y = 0; y < image.height; ++y) {
for (uint32_t x = 0; x < image.width; ++x) {
for (uint32_t c = 0; c < image.channel; ++c) {
preprocessing_set_4d(tensor, sd_image_get_f32(image, x, y, c), x, y, c, 0);
}
}
}
return tensor;
}
static inline void preprocessing_tensor_to_sd_image(const sd::Tensor<float>& tensor, uint8_t* image_data) {
GGML_ASSERT(tensor.dim() == 4);
GGML_ASSERT(tensor.shape()[3] == 1);
GGML_ASSERT(image_data != nullptr);
int width = static_cast<int>(tensor.shape()[0]);
int height = static_cast<int>(tensor.shape()[1]);
int channel = static_cast<int>(tensor.shape()[2]);
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
for (int c = 0; c < channel; ++c) {
float value = preprocessing_get_4d(tensor, x, y, c, 0);
value = std::min(1.0f, std::max(0.0f, value));
image_data[(y * width + x) * channel + c] = static_cast<uint8_t>(std::round(value * 255.0f));
}
}
}
}
static inline sd::Tensor<float> gaussian_kernel_tensor(int kernel_size) {
sd::Tensor<float> kernel({kernel_size, kernel_size, 1, 1});
int ks_mid = kernel_size / 2;
float sigma = 1.4f;
float normal = 1.f / (2.0f * M_PI_ * powf(sigma, 2.0f));
for (int y = 0; y < kernel->ne[0]; y++) {
float normal = 1.f / (2.0f * M_PI_ * std::pow(sigma, 2.0f));
for (int y = 0; y < kernel_size; ++y) {
float gx = static_cast<float>(-ks_mid + y);
for (int x = 0; x < kernel->ne[1]; x++) {
for (int x = 0; x < kernel_size; ++x) {
float gy = static_cast<float>(-ks_mid + x);
float k_ = expf(-((gx * gx + gy * gy) / (2.0f * powf(sigma, 2.0f)))) * normal;
ggml_ext_tensor_set_f32(kernel, k_, x, y);
float k = std::exp(-((gx * gx + gy * gy) / (2.0f * std::pow(sigma, 2.0f)))) * normal;
preprocessing_set_4d(kernel, k, x, y, 0, 0);
}
}
return kernel;
}
void grayscale(ggml_tensor* rgb_img, ggml_tensor* grayscale) {
for (int iy = 0; iy < rgb_img->ne[1]; iy++) {
for (int ix = 0; ix < rgb_img->ne[0]; ix++) {
float r = ggml_ext_tensor_get_f32(rgb_img, ix, iy);
float g = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 1);
float b = ggml_ext_tensor_get_f32(rgb_img, ix, iy, 2);
static inline sd::Tensor<float> convolve_tensor(const sd::Tensor<float>& input, const sd::Tensor<float>& kernel, int padding) {
GGML_ASSERT(input.dim() == 4);
GGML_ASSERT(kernel.dim() == 4);
GGML_ASSERT(input.shape()[3] == 1);
GGML_ASSERT(kernel.shape()[2] == 1);
GGML_ASSERT(kernel.shape()[3] == 1);
sd::Tensor<float> output(input.shape());
int64_t width = input.shape()[0];
int64_t height = input.shape()[1];
int64_t channels = input.shape()[2];
int64_t kernel_w = kernel.shape()[0];
int64_t kernel_h = kernel.shape()[1];
for (int64_t c = 0; c < channels; ++c) {
for (int64_t y = 0; y < height; ++y) {
for (int64_t x = 0; x < width; ++x) {
float sum = 0.0f;
for (int64_t ky = 0; ky < kernel_h; ++ky) {
int64_t iy = y + ky - padding;
if (iy < 0 || iy >= height) {
continue;
}
for (int64_t kx = 0; kx < kernel_w; ++kx) {
int64_t ix = x + kx - padding;
if (ix < 0 || ix >= width) {
continue;
}
sum += preprocessing_get_4d(input, ix, iy, c, 0) * preprocessing_get_4d(kernel, kx, ky, 0, 0);
}
}
preprocessing_set_4d(output, sum, x, y, c, 0);
}
}
}
return output;
}
static inline sd::Tensor<float> grayscale_tensor(const sd::Tensor<float>& rgb_img) {
GGML_ASSERT(rgb_img.dim() == 4);
GGML_ASSERT(rgb_img.shape()[2] >= 3);
sd::Tensor<float> grayscale({rgb_img.shape()[0], rgb_img.shape()[1], 1, rgb_img.shape()[3]});
for (int64_t iy = 0; iy < rgb_img.shape()[1]; ++iy) {
for (int64_t ix = 0; ix < rgb_img.shape()[0]; ++ix) {
float r = preprocessing_get_4d(rgb_img, ix, iy, 0, 0);
float g = preprocessing_get_4d(rgb_img, ix, iy, 1, 0);
float b = preprocessing_get_4d(rgb_img, ix, iy, 2, 0);
float gray = 0.2989f * r + 0.5870f * g + 0.1140f * b;
ggml_ext_tensor_set_f32(grayscale, gray, ix, iy);
preprocessing_set_4d(grayscale, gray, ix, iy, 0, 0);
}
}
return grayscale;
}
void prop_hypot(ggml_tensor* x, ggml_tensor* y, ggml_tensor* h) {
int n_elements = static_cast<int>(ggml_nelements(h));
float* dx = (float*)x->data;
float* dy = (float*)y->data;
float* dh = (float*)h->data;
for (int i = 0; i < n_elements; i++) {
dh[i] = sqrtf(dx[i] * dx[i] + dy[i] * dy[i]);
static inline sd::Tensor<float> tensor_hypot(const sd::Tensor<float>& x, const sd::Tensor<float>& y) {
sd::tensor_check_same_shape(x, y);
sd::Tensor<float> out(x.shape());
for (int64_t i = 0; i < out.numel(); ++i) {
out[i] = std::sqrt(x[i] * x[i] + y[i] * y[i]);
}
return out;
}
void prop_arctan2(ggml_tensor* x, ggml_tensor* y, ggml_tensor* h) {
int n_elements = static_cast<int>(ggml_nelements(h));
float* dx = (float*)x->data;
float* dy = (float*)y->data;
float* dh = (float*)h->data;
for (int i = 0; i < n_elements; i++) {
dh[i] = atan2f(dy[i], dx[i]);
static inline sd::Tensor<float> tensor_arctan2(const sd::Tensor<float>& x, const sd::Tensor<float>& y) {
sd::tensor_check_same_shape(x, y);
sd::Tensor<float> out(x.shape());
for (int64_t i = 0; i < out.numel(); ++i) {
out[i] = std::atan2(y[i], x[i]);
}
return out;
}
void normalize_tensor(ggml_tensor* g) {
int n_elements = static_cast<int>(ggml_nelements(g));
float* dg = (float*)g->data;
float max = -INFINITY;
for (int i = 0; i < n_elements; i++) {
max = dg[i] > max ? dg[i] : max;
static inline void normalize_tensor(sd::Tensor<float>* g) {
GGML_ASSERT(g != nullptr);
if (g->empty()) {
return;
}
max = 1.0f / max;
for (int i = 0; i < n_elements; i++) {
dg[i] *= max;
float max_value = -std::numeric_limits<float>::infinity();
for (int64_t i = 0; i < g->numel(); ++i) {
max_value = std::max(max_value, (*g)[i]);
}
if (max_value == 0.0f || !std::isfinite(max_value)) {
return;
}
*g *= (1.0f / max_value);
}
void non_max_supression(ggml_tensor* result, ggml_tensor* G, ggml_tensor* D) {
for (int iy = 1; iy < result->ne[1] - 1; iy++) {
for (int ix = 1; ix < result->ne[0] - 1; ix++) {
float angle = ggml_ext_tensor_get_f32(D, ix, iy) * 180.0f / M_PI_;
angle = angle < 0.0f ? angle += 180.0f : angle;
static inline sd::Tensor<float> non_max_supression(const sd::Tensor<float>& G, const sd::Tensor<float>& D) {
GGML_ASSERT(G.shape() == D.shape());
sd::Tensor<float> result = sd::Tensor<float>::zeros(G.shape());
for (int64_t iy = 1; iy < result.shape()[1] - 1; ++iy) {
for (int64_t ix = 1; ix < result.shape()[0] - 1; ++ix) {
float angle = preprocessing_get_4d(D, ix, iy, 0, 0) * 180.0f / M_PI_;
angle = angle < 0.0f ? angle + 180.0f : angle;
float q = 1.0f;
float r = 1.0f;
// angle 0
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180)) {
q = ggml_ext_tensor_get_f32(G, ix, iy + 1);
r = ggml_ext_tensor_get_f32(G, ix, iy - 1);
}
// angle 45
else if (22.5f >= angle && angle < 67.5f) {
q = ggml_ext_tensor_get_f32(G, ix + 1, iy - 1);
r = ggml_ext_tensor_get_f32(G, ix - 1, iy + 1);
}
// angle 90
else if (67.5f >= angle && angle < 112.5) {
q = ggml_ext_tensor_get_f32(G, ix + 1, iy);
r = ggml_ext_tensor_get_f32(G, ix - 1, iy);
}
// angle 135
else if (112.5 >= angle && angle < 157.5f) {
q = ggml_ext_tensor_get_f32(G, ix - 1, iy - 1);
r = ggml_ext_tensor_get_f32(G, ix + 1, iy + 1);
if ((0 >= angle && angle < 22.5f) || (157.5f >= angle && angle <= 180.0f)) {
q = preprocessing_get_4d(G, ix, iy + 1, 0, 0);
r = preprocessing_get_4d(G, ix, iy - 1, 0, 0);
} else if (22.5f >= angle && angle < 67.5f) {
q = preprocessing_get_4d(G, ix + 1, iy - 1, 0, 0);
r = preprocessing_get_4d(G, ix - 1, iy + 1, 0, 0);
} else if (67.5f >= angle && angle < 112.5f) {
q = preprocessing_get_4d(G, ix + 1, iy, 0, 0);
r = preprocessing_get_4d(G, ix - 1, iy, 0, 0);
} else if (112.5f >= angle && angle < 157.5f) {
q = preprocessing_get_4d(G, ix - 1, iy - 1, 0, 0);
r = preprocessing_get_4d(G, ix + 1, iy + 1, 0, 0);
}
float cur = ggml_ext_tensor_get_f32(G, ix, iy);
if ((cur >= q) && (cur >= r)) {
ggml_ext_tensor_set_f32(result, cur, ix, iy);
} else {
ggml_ext_tensor_set_f32(result, 0.0f, ix, iy);
}
float cur = preprocessing_get_4d(G, ix, iy, 0, 0);
preprocessing_set_4d(result, (cur >= q && cur >= r) ? cur : 0.0f, ix, iy, 0, 0);
}
}
return result;
}
void threshold_hystersis(ggml_tensor* img, float high_threshold, float low_threshold, float weak, float strong) {
int n_elements = static_cast<int>(ggml_nelements(img));
float* imd = (float*)img->data;
float max = -INFINITY;
for (int i = 0; i < n_elements; i++) {
max = imd[i] > max ? imd[i] : max;
static inline void threshold_hystersis(sd::Tensor<float>* img, float high_threshold, float low_threshold, float weak, float strong) {
GGML_ASSERT(img != nullptr);
if (img->empty()) {
return;
}
float ht = max * high_threshold;
float max_value = -std::numeric_limits<float>::infinity();
for (int64_t i = 0; i < img->numel(); ++i) {
max_value = std::max(max_value, (*img)[i]);
}
float ht = max_value * high_threshold;
float lt = ht * low_threshold;
for (int i = 0; i < n_elements; i++) {
float img_v = imd[i];
if (img_v >= ht) { // strong pixel
imd[i] = strong;
} else if (img_v <= ht && img_v >= lt) { // strong pixel
imd[i] = weak;
for (int64_t i = 0; i < img->numel(); ++i) {
float img_v = (*img)[i];
if (img_v >= ht) {
(*img)[i] = strong;
} else if (img_v <= ht && img_v >= lt) {
(*img)[i] = weak;
}
}
for (int iy = 0; iy < img->ne[1]; iy++) {
for (int ix = 0; ix < img->ne[0]; ix++) {
if (ix >= 3 && ix <= img->ne[0] - 3 && iy >= 3 && iy <= img->ne[1] - 3) {
ggml_ext_tensor_set_f32(img, ggml_ext_tensor_get_f32(img, ix, iy), ix, iy);
} else {
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
for (int64_t iy = 0; iy < img->shape()[1]; ++iy) {
for (int64_t ix = 0; ix < img->shape()[0]; ++ix) {
if (!(ix >= 3 && ix <= img->shape()[0] - 3 && iy >= 3 && iy <= img->shape()[1] - 3)) {
preprocessing_set_4d(*img, 0.0f, ix, iy, 0, 0);
}
}
}
// hysteresis
for (int iy = 1; iy < img->ne[1] - 1; iy++) {
for (int ix = 1; ix < img->ne[0] - 1; ix++) {
float imd_v = ggml_ext_tensor_get_f32(img, ix, iy);
for (int64_t iy = 1; iy < img->shape()[1] - 1; ++iy) {
for (int64_t ix = 1; ix < img->shape()[0] - 1; ++ix) {
float imd_v = preprocessing_get_4d(*img, ix, iy, 0, 0);
if (imd_v == weak) {
if (ggml_ext_tensor_get_f32(img, ix + 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix + 1, iy) == strong ||
ggml_ext_tensor_get_f32(img, ix, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix, iy + 1) == strong ||
ggml_ext_tensor_get_f32(img, ix - 1, iy - 1) == strong || ggml_ext_tensor_get_f32(img, ix - 1, iy) == strong) {
ggml_ext_tensor_set_f32(img, strong, ix, iy);
} else {
ggml_ext_tensor_set_f32(img, 0.0f, ix, iy);
}
bool has_strong_neighbor =
preprocessing_get_4d(*img, ix + 1, iy - 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix + 1, iy, 0, 0) == strong ||
preprocessing_get_4d(*img, ix, iy - 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix, iy + 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix - 1, iy - 1, 0, 0) == strong ||
preprocessing_get_4d(*img, ix - 1, iy, 0, 0) == strong;
preprocessing_set_4d(*img, has_strong_neighbor ? strong : 0.0f, ix, iy, 0, 0);
}
}
}
}
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
ggml_init_params params;
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
if (!work_ctx) {
LOG_ERROR("ggml_init() failed");
return false;
}
float kX[9] = {
-1, 0, 1,
-2, 0, 2,
@ -184,43 +246,33 @@ bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold,
0, 0, 0,
-1, -2, -1};
// generate kernel
int kernel_size = 5;
ggml_tensor* gkernel = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, kernel_size, kernel_size, 1, 1);
ggml_tensor* sf_kx = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
memcpy(sf_kx->data, kX, ggml_nbytes(sf_kx));
ggml_tensor* sf_ky = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 3, 3, 1, 1);
memcpy(sf_ky->data, kY, ggml_nbytes(sf_ky));
gaussian_kernel(gkernel);
ggml_tensor* image = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 3, 1);
ggml_tensor* image_gray = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, img.width, img.height, 1, 1);
ggml_tensor* iX = ggml_dup_tensor(work_ctx, image_gray);
ggml_tensor* iY = ggml_dup_tensor(work_ctx, image_gray);
ggml_tensor* G = ggml_dup_tensor(work_ctx, image_gray);
ggml_tensor* tetha = ggml_dup_tensor(work_ctx, image_gray);
sd_image_to_ggml_tensor(img, image);
grayscale(image, image_gray);
convolve(image_gray, image_gray, gkernel, 2);
convolve(image_gray, iX, sf_kx, 1);
convolve(image_gray, iY, sf_ky, 1);
prop_hypot(iX, iY, G);
normalize_tensor(G);
prop_arctan2(iX, iY, tetha);
non_max_supression(image_gray, G, tetha);
threshold_hystersis(image_gray, high_threshold, low_threshold, weak, strong);
// to RGB channels
for (uint32_t iy = 0; iy < img.height; iy++) {
for (uint32_t ix = 0; ix < img.width; ix++) {
float gray = ggml_ext_tensor_get_f32(image_gray, ix, iy);
sd::Tensor<float> gkernel = gaussian_kernel_tensor(5);
sd::Tensor<float> sf_kx({3, 3, 1, 1}, std::vector<float>(kX, kX + 9));
sd::Tensor<float> sf_ky({3, 3, 1, 1}, std::vector<float>(kY, kY + 9));
sd::Tensor<float> image = sd_image_to_preprocessing_tensor(img);
sd::Tensor<float> image_gray = grayscale_tensor(image);
image_gray = convolve_tensor(image_gray, gkernel, 2);
sd::Tensor<float> iX = convolve_tensor(image_gray, sf_kx, 1);
sd::Tensor<float> iY = convolve_tensor(image_gray, sf_ky, 1);
sd::Tensor<float> G = tensor_hypot(iX, iY);
normalize_tensor(&G);
sd::Tensor<float> theta = tensor_arctan2(iX, iY);
image_gray = non_max_supression(G, theta);
threshold_hystersis(&image_gray, high_threshold, low_threshold, weak, strong);
for (uint32_t iy = 0; iy < img.height; ++iy) {
for (uint32_t ix = 0; ix < img.width; ++ix) {
float gray = preprocessing_get_4d(image_gray, ix, iy, 0, 0);
gray = inverse ? 1.0f - gray : gray;
ggml_ext_tensor_set_f32(image, gray, ix, iy);
ggml_ext_tensor_set_f32(image, gray, ix, iy, 1);
ggml_ext_tensor_set_f32(image, gray, ix, iy, 2);
for (uint32_t c = 0; c < img.channel; ++c) {
preprocessing_set_4d(image, gray, ix, iy, c, 0);
}
}
}
ggml_tensor_to_sd_image(image, img.data);
ggml_free(work_ctx);
preprocessing_tensor_to_sd_image(image, img.data);
return true;
}
#endif // __PREPROCESSING_HPP__
#endif // __PREPROCESSING_HPP__

View File

@ -525,20 +525,21 @@ namespace Qwen {
qwen_image.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor,
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
bool increase_ref_index = false) {
ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(QWEN_IMAGE_GRAPH_SIZE);
x = to_backend(x);
context = to_backend(context);
timesteps = to_backend(timesteps);
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
GGML_ASSERT(!context_tensor.empty());
ggml_tensor* context = make_input(context_tensor);
std::vector<ggml_tensor*> ref_latents;
ref_latents.reserve(ref_latents_tensor.size());
for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
}
pe_vec = Rope::gen_qwen_image_pe(static_cast<int>(x->ne[1]),
@ -600,14 +601,12 @@ namespace Qwen {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context,
const std::vector<sd::Tensor<float>>& ref_latents = {},
bool increase_ref_index = false) {
// x: [N, in_channels, h, w]
// timesteps: [N, ]
// context: [N, max_position, hidden_size]
@ -615,7 +614,7 @@ namespace Qwen {
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
void test() {
@ -624,30 +623,37 @@ namespace Qwen {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// ggml_set_f32(x, 0.01f);
auto x = load_tensor_from_file(work_ctx, "./qwen_image_x.bin");
print_ggml_tensor(x);
auto x = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_x.bin");
print_sd_tensor(x);
std::vector<float> timesteps_vec(1, 1000.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 3584, 256, 1);
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 3584, 256, 1);
// ggml_set_f32(context, 0.01f);
auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin");
print_ggml_tensor(context);
auto context = sd::load_tensor_from_file_as_tensor<float>("./qwen_image_context.bin");
print_sd_tensor(context);
ggml_tensor* out = nullptr;
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = compute(8,
x,
timesteps,
context,
{},
false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("qwen_image test done in %lldms", t1 - t0);
}
}

361
src/sample-cache.cpp Normal file
View File

@ -0,0 +1,361 @@
#include "sample-cache.h"
namespace sd_sample {
static float get_cache_reuse_threshold(const sd_cache_params_t& params) {
float reuse_threshold = params.reuse_threshold;
if (reuse_threshold == INFINITY) {
if (params.mode == SD_CACHE_EASYCACHE) {
reuse_threshold = 0.2f;
} else if (params.mode == SD_CACHE_UCACHE) {
reuse_threshold = 1.0f;
}
}
return std::max(0.0f, reuse_threshold);
}
bool SampleCacheRuntime::easycache_enabled() const {
return mode == SampleCacheMode::EASYCACHE;
}
bool SampleCacheRuntime::ucache_enabled() const {
return mode == SampleCacheMode::UCACHE;
}
bool SampleCacheRuntime::cachedit_enabled() const {
return mode == SampleCacheMode::CACHEDIT;
}
static bool has_valid_cache_percent_range(const sd_cache_params_t& cache_params) {
if (cache_params.mode != SD_CACHE_EASYCACHE && cache_params.mode != SD_CACHE_UCACHE) {
return true;
}
return cache_params.start_percent >= 0.0f &&
cache_params.start_percent < 1.0f &&
cache_params.end_percent > 0.0f &&
cache_params.end_percent <= 1.0f &&
cache_params.start_percent < cache_params.end_percent;
}
static void init_easycache_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
Denoiser* denoiser) {
if (!sd_version_is_dit(version)) {
LOG_WARN("EasyCache requested but not supported for this model type");
return;
}
EasyCacheConfig config;
config.enabled = true;
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
config.start_percent = cache_params.start_percent;
config.end_percent = cache_params.end_percent;
runtime.easycache.init(config, denoiser);
if (!runtime.easycache.enabled()) {
LOG_WARN("EasyCache requested but could not be initialized for this run");
return;
}
runtime.mode = SampleCacheMode::EASYCACHE;
LOG_INFO("EasyCache enabled - threshold: %.3f, start: %.2f, end: %.2f",
config.reuse_threshold,
config.start_percent,
config.end_percent);
}
static void init_ucache_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas) {
if (!sd_version_is_unet(version)) {
LOG_WARN("UCache requested but not supported for this model type (only UNET models)");
return;
}
UCacheConfig config;
config.enabled = true;
config.reuse_threshold = get_cache_reuse_threshold(cache_params);
config.start_percent = cache_params.start_percent;
config.end_percent = cache_params.end_percent;
config.error_decay_rate = std::max(0.0f, std::min(1.0f, cache_params.error_decay_rate));
config.use_relative_threshold = cache_params.use_relative_threshold;
config.reset_error_on_compute = cache_params.reset_error_on_compute;
runtime.ucache.init(config, denoiser);
if (!runtime.ucache.enabled()) {
LOG_WARN("UCache requested but could not be initialized for this run");
return;
}
runtime.ucache.set_sigmas(sigmas);
runtime.mode = SampleCacheMode::UCACHE;
LOG_INFO("UCache enabled - threshold: %.3f, start: %.2f, end: %.2f, decay: %.2f, relative: %s, reset: %s",
config.reuse_threshold,
config.start_percent,
config.end_percent,
config.error_decay_rate,
config.use_relative_threshold ? "true" : "false",
config.reset_error_on_compute ? "true" : "false");
}
static void init_cachedit_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
const std::vector<float>& sigmas) {
if (!sd_version_is_dit(version)) {
LOG_WARN("CacheDIT requested but not supported for this model type (only DiT models)");
return;
}
DBCacheConfig dbcfg;
dbcfg.enabled = (cache_params.mode == SD_CACHE_DBCACHE || cache_params.mode == SD_CACHE_CACHE_DIT);
dbcfg.Fn_compute_blocks = cache_params.Fn_compute_blocks;
dbcfg.Bn_compute_blocks = cache_params.Bn_compute_blocks;
dbcfg.residual_diff_threshold = cache_params.residual_diff_threshold;
dbcfg.max_warmup_steps = cache_params.max_warmup_steps;
dbcfg.max_cached_steps = cache_params.max_cached_steps;
dbcfg.max_continuous_cached_steps = cache_params.max_continuous_cached_steps;
if (cache_params.scm_mask != nullptr && strlen(cache_params.scm_mask) > 0) {
dbcfg.steps_computation_mask = parse_scm_mask(cache_params.scm_mask);
}
dbcfg.scm_policy_dynamic = cache_params.scm_policy_dynamic;
TaylorSeerConfig tcfg;
tcfg.enabled = (cache_params.mode == SD_CACHE_TAYLORSEER || cache_params.mode == SD_CACHE_CACHE_DIT);
tcfg.n_derivatives = cache_params.taylorseer_n_derivatives;
tcfg.skip_interval_steps = cache_params.taylorseer_skip_interval;
runtime.cachedit.init(dbcfg, tcfg);
if (!runtime.cachedit.enabled()) {
LOG_WARN("CacheDIT requested but could not be initialized for this run");
return;
}
runtime.cachedit.set_sigmas(sigmas);
runtime.mode = SampleCacheMode::CACHEDIT;
LOG_INFO("CacheDIT enabled - mode: %s, Fn: %d, Bn: %d, threshold: %.3f, warmup: %d",
cache_params.mode == SD_CACHE_CACHE_DIT ? "DBCache+TaylorSeer" : (cache_params.mode == SD_CACHE_DBCACHE ? "DBCache" : "TaylorSeer"),
dbcfg.Fn_compute_blocks,
dbcfg.Bn_compute_blocks,
dbcfg.residual_diff_threshold,
dbcfg.max_warmup_steps);
}
static void init_spectrum_runtime(SampleCacheRuntime& runtime,
SDVersion version,
const sd_cache_params_t& cache_params,
const std::vector<float>& sigmas) {
if (!sd_version_is_unet(version) && !sd_version_is_dit(version)) {
LOG_WARN("Spectrum requested but not supported for this model type (only UNET and DiT models)");
return;
}
SpectrumConfig config;
config.w = cache_params.spectrum_w;
config.m = cache_params.spectrum_m;
config.lam = cache_params.spectrum_lam;
config.window_size = cache_params.spectrum_window_size;
config.flex_window = cache_params.spectrum_flex_window;
config.warmup_steps = cache_params.spectrum_warmup_steps;
config.stop_percent = cache_params.spectrum_stop_percent;
size_t total_steps = sigmas.size() > 0 ? sigmas.size() - 1 : 0;
runtime.spectrum.init(config, total_steps);
runtime.spectrum_enabled = true;
LOG_INFO("Spectrum enabled - w: %.2f, m: %d, lam: %.2f, window: %d, flex: %.2f, warmup: %d, stop: %.0f%%",
config.w, config.m, config.lam,
config.window_size, config.flex_window,
config.warmup_steps, config.stop_percent * 100.0f);
}
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
const sd_cache_params_t* cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas) {
SampleCacheRuntime runtime;
if (cache_params == nullptr || cache_params->mode == SD_CACHE_DISABLED) {
return runtime;
}
if (!has_valid_cache_percent_range(*cache_params)) {
LOG_WARN("Cache disabled due to invalid percent range (start=%.3f, end=%.3f)",
cache_params->start_percent,
cache_params->end_percent);
return runtime;
}
switch (cache_params->mode) {
case SD_CACHE_EASYCACHE:
init_easycache_runtime(runtime, version, *cache_params, denoiser);
break;
case SD_CACHE_UCACHE:
init_ucache_runtime(runtime, version, *cache_params, denoiser, sigmas);
break;
case SD_CACHE_DBCACHE:
case SD_CACHE_TAYLORSEER:
case SD_CACHE_CACHE_DIT:
init_cachedit_runtime(runtime, version, *cache_params, sigmas);
break;
case SD_CACHE_SPECTRUM:
init_spectrum_runtime(runtime, version, *cache_params, sigmas);
break;
default:
break;
}
return runtime;
}
SampleStepCacheDispatcher::SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma)
: runtime(runtime), step(step), sigma(sigma), step_index(step > 0 ? (step - 1) : -1) {
if (step_index < 0) {
return;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
runtime.easycache.begin_step(step_index, sigma);
break;
case SampleCacheMode::UCACHE:
runtime.ucache.begin_step(step_index, sigma);
break;
case SampleCacheMode::CACHEDIT:
runtime.cachedit.begin_step(step_index, sigma);
break;
case SampleCacheMode::NONE:
break;
}
}
bool SampleStepCacheDispatcher::before_condition(const void* condition,
const sd::Tensor<float>& input,
sd::Tensor<float>* output) {
if (step_index < 0 || condition == nullptr || output == nullptr) {
return false;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
return runtime.easycache.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::UCACHE:
return runtime.ucache.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::CACHEDIT:
return runtime.cachedit.before_condition(condition, input, output, sigma, step_index);
case SampleCacheMode::NONE:
return false;
}
return false;
}
void SampleStepCacheDispatcher::after_condition(const void* condition,
const sd::Tensor<float>& input,
const sd::Tensor<float>& output) {
if (step_index < 0 || condition == nullptr) {
return;
}
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
runtime.easycache.after_condition(condition, input, output);
break;
case SampleCacheMode::UCACHE:
runtime.ucache.after_condition(condition, input, output);
break;
case SampleCacheMode::CACHEDIT:
runtime.cachedit.after_condition(condition, input, output);
break;
case SampleCacheMode::NONE:
break;
}
}
bool SampleStepCacheDispatcher::is_step_skipped() const {
switch (runtime.mode) {
case SampleCacheMode::EASYCACHE:
return runtime.easycache.is_step_skipped();
case SampleCacheMode::UCACHE:
return runtime.ucache.is_step_skipped();
case SampleCacheMode::CACHEDIT:
return runtime.cachedit.is_step_skipped();
case SampleCacheMode::NONE:
return false;
}
return false;
}
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps) {
if (runtime.easycache_enabled()) {
if (runtime.easycache.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.easycache.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.easycache.total_steps_skipped);
LOG_INFO("EasyCache skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.easycache.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("EasyCache skipped %d/%zu steps",
runtime.easycache.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("EasyCache completed without skipping steps");
}
}
if (runtime.ucache_enabled()) {
if (runtime.ucache.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.ucache.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.ucache.total_steps_skipped);
LOG_INFO("UCache skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.ucache.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("UCache skipped %d/%zu steps",
runtime.ucache.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("UCache completed without skipping steps");
}
}
if (runtime.cachedit_enabled()) {
if (runtime.cachedit.total_steps_skipped > 0 && total_steps > 0) {
if (runtime.cachedit.total_steps_skipped < static_cast<int>(total_steps)) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.cachedit.total_steps_skipped);
LOG_INFO("CacheDIT skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.cachedit.total_steps_skipped,
total_steps,
speedup);
} else {
LOG_INFO("CacheDIT skipped %d/%zu steps",
runtime.cachedit.total_steps_skipped,
total_steps);
}
} else if (total_steps > 0) {
LOG_INFO("CacheDIT completed without skipping steps");
}
}
if (runtime.spectrum_enabled && runtime.spectrum.total_steps_skipped > 0 && total_steps > 0) {
double speedup = static_cast<double>(total_steps) /
static_cast<double>(total_steps - runtime.spectrum.total_steps_skipped);
LOG_INFO("Spectrum skipped %d/%zu steps (%.2fx estimated speedup)",
runtime.spectrum.total_steps_skipped,
total_steps,
speedup);
}
}
} // namespace sd_sample

61
src/sample-cache.h Normal file
View File

@ -0,0 +1,61 @@
#ifndef __SAMPLE_CACHE_H__
#define __SAMPLE_CACHE_H__
#include <vector>
#include "cache_dit.hpp"
#include "denoiser.hpp"
#include "easycache.hpp"
#include "model.h"
#include "spectrum.hpp"
#include "tensor.hpp"
#include "ucache.hpp"
#include "util.h"
namespace sd_sample {
enum class SampleCacheMode {
NONE,
EASYCACHE,
UCACHE,
CACHEDIT,
};
struct SampleCacheRuntime {
SampleCacheMode mode = SampleCacheMode::NONE;
EasyCacheState easycache;
UCacheState ucache;
CacheDitConditionState cachedit;
SpectrumState spectrum;
bool spectrum_enabled = false;
bool easycache_enabled() const;
bool ucache_enabled() const;
bool cachedit_enabled() const;
};
struct SampleStepCacheDispatcher {
SampleCacheRuntime& runtime;
int step;
float sigma;
int step_index;
SampleStepCacheDispatcher(SampleCacheRuntime& runtime, int step, float sigma);
bool before_condition(const void* condition, const sd::Tensor<float>& input, sd::Tensor<float>* output);
void after_condition(const void* condition, const sd::Tensor<float>& input, const sd::Tensor<float>& output);
bool is_step_skipped() const;
};
SampleCacheRuntime init_sample_cache_runtime(SDVersion version,
const sd_cache_params_t* cache_params,
Denoiser* denoiser,
const std::vector<float>& sigmas);
void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t total_steps);
} // namespace sd_sample
#endif // __SAMPLE_CACHE_H__

View File

@ -6,6 +6,7 @@
#include <vector>
#include "ggml_extend.hpp"
#include "tensor.hpp"
struct SpectrumConfig {
float w = 0.40f;
@ -57,11 +58,8 @@ struct SpectrumState {
return (num_cached + 1) % ws != 0;
}
void update(const ggml_tensor* denoised) {
int64_t ne = ggml_nelements(denoised);
const float* data = (const float*)denoised->data;
H_buf.emplace_back(data, data + ne);
void update(const sd::Tensor<float>& denoised) {
H_buf.emplace_back(denoised.data(), denoised.data() + denoised.numel());
T_buf.push_back(taus(cnt));
while ((int)H_buf.size() > K) {
@ -76,13 +74,13 @@ struct SpectrumState {
cnt++;
}
void predict(ggml_tensor* denoised) {
void predict(sd::Tensor<float>* denoised) {
GGML_ASSERT(denoised != nullptr);
int64_t F = (int64_t)H_buf[0].size();
int K_curr = (int)H_buf.size();
int M1 = config.m + 1;
float tau_at = taus(cnt);
// Design matrix X: K_curr x M1 (Chebyshev basis)
std::vector<float> X(K_curr * M1);
for (int i = 0; i < K_curr; i++) {
X[i * M1] = 1.0f;
@ -92,7 +90,6 @@ struct SpectrumState {
X[i * M1 + j] = 2.0f * T_buf[i] * X[i * M1 + j - 1] - X[i * M1 + j - 2];
}
// x_star: Chebyshev basis at current tau
std::vector<float> x_star(M1);
x_star[0] = 1.0f;
if (M1 > 1)
@ -100,7 +97,6 @@ struct SpectrumState {
for (int j = 2; j < M1; j++)
x_star[j] = 2.0f * tau_at * x_star[j - 1] - x_star[j - 2];
// XtX = X^T X + lambda I
std::vector<float> XtX(M1 * M1, 0.0f);
for (int i = 0; i < M1; i++) {
for (int j = 0; j < M1; j++) {
@ -111,7 +107,6 @@ struct SpectrumState {
}
}
// Cholesky decomposition
std::vector<float> L(M1 * M1, 0.0f);
if (!cholesky_decompose(XtX.data(), L.data(), M1)) {
float trace = 0.0f;
@ -122,18 +117,15 @@ struct SpectrumState {
cholesky_decompose(XtX.data(), L.data(), M1);
}
// Solve XtX v = x_star
std::vector<float> v(M1);
cholesky_solve(L.data(), x_star.data(), v.data(), M1);
// Prediction weights per history entry
std::vector<float> weights(K_curr, 0.0f);
for (int k = 0; k < K_curr; k++)
for (int j = 0; j < M1; j++)
weights[k] += X[k * M1 + j] * v[j];
// Blend Chebyshev and Taylor predictions
float* out = (float*)denoised->data;
float* out = denoised->data();
float w_cheb = config.w;
float w_taylor = 1.0f - w_cheb;
const float* h_last = H_buf.back().data();

File diff suppressed because it is too large Load Diff

2074
src/t5.hpp

File diff suppressed because it is too large Load Diff

View File

@ -562,41 +562,40 @@ struct TinyImageAutoEncoder : public VAE {
taesd.get_param_tensors(tensors, prefix);
}
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
SD_UNUSED(rng);
return vae_output;
}
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
return latents;
}
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
return latents;
}
int get_encoder_output_channels(int input_channels) {
return taesd.z_channels;
}
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
z = to_backend(z);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
ggml_tensor* out = decode_graph ? taesd.decode(&runner_ctx, z) : taesd.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out);
return gf;
}
bool _compute(const int n_threads,
ggml_tensor* z,
bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z_tensor,
bool decode_graph) override {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z, decode_graph);
return build_graph(z_tensor, decode_graph);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim());
}
};
@ -625,42 +624,41 @@ struct TinyVideoAutoEncoder : public VAE {
taehv.get_param_tensors(tensors, prefix);
}
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
SD_UNUSED(rng);
return vae_output;
}
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
return latents;
}
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
return latents;
}
int get_encoder_output_channels(int input_channels) {
return taehv.z_channels;
}
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = ggml_new_graph(compute_ctx);
z = to_backend(z);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
ggml_tensor* out = decode_graph ? taehv.decode(&runner_ctx, z) : taehv.encode(&runner_ctx, z);
ggml_build_forward_expand(gf, out);
return gf;
}
bool _compute(const int n_threads,
ggml_tensor* z,
bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z_tensor,
bool decode_graph) override {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z, decode_graph);
return build_graph(z_tensor, decode_graph);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), z_tensor.dim());
}
};
#endif // __TAE_HPP__
#endif // __TAE_HPP__

1249
src/tensor.hpp Normal file

File diff suppressed because it is too large Load Diff

127
src/tensor_ggml.hpp Normal file
View File

@ -0,0 +1,127 @@
#ifndef __SD_TENSOR_GGML_HPP__
#define __SD_TENSOR_GGML_HPP__
#include <array>
#include <cstring>
#include <fstream>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "ggml.h"
#include "tensor.hpp"
namespace sd {
template <typename T>
struct GGMLTypeTraits;
template <>
struct GGMLTypeTraits<float> {
static constexpr ggml_type type = GGML_TYPE_F32;
};
template <>
struct GGMLTypeTraits<ggml_fp16_t> {
static constexpr ggml_type type = GGML_TYPE_F16;
};
template <>
struct GGMLTypeTraits<int32_t> {
static constexpr ggml_type type = GGML_TYPE_I32;
};
template <>
struct GGMLTypeTraits<int64_t> {
static constexpr ggml_type type = GGML_TYPE_I64;
};
inline std::vector<int64_t> shape_from_ggml(const ggml_tensor* tensor) {
std::vector<int64_t> shape;
shape.reserve(static_cast<size_t>(ggml_n_dims(tensor)));
for (int i = 0; i < ggml_n_dims(tensor); ++i) {
shape.push_back(tensor->ne[i]);
}
return shape;
}
template <typename T>
inline Tensor<T> make_sd_tensor_from_ggml(const ggml_tensor* tensor) {
if (tensor == nullptr) {
return {};
}
if (tensor->type != GGMLTypeTraits<T>::type) {
GGML_ABORT("ggml tensor type does not match sd::Tensor type");
}
Tensor<T> result(shape_from_ggml(tensor));
if (tensor->buffer != nullptr) {
ggml_backend_tensor_get(tensor, result.data(), 0, ggml_nbytes(tensor));
} else {
std::memcpy(result.data(), tensor->data, ggml_nbytes(tensor));
}
return result;
}
template <typename T>
inline ggml_tensor* make_ggml_tensor(ggml_context* ctx, const Tensor<T>& tensor, bool copy_data = true) {
GGML_ASSERT(tensor.dim() > 0 && tensor.dim() <= 5);
int n_dims = std::min(static_cast<int>(tensor.dim()), GGML_MAX_DIMS);
std::array<int64_t, GGML_MAX_DIMS> ne = {1, 1, 1, 1};
for (int64_t i = 0; i < n_dims; ++i) {
ne[static_cast<size_t>(i)] = tensor.shape()[static_cast<size_t>(i)];
}
if (tensor.dim() == 5) {
ne[3] *= tensor.shape()[4];
}
ggml_tensor* result = ggml_new_tensor(ctx, GGMLTypeTraits<T>::type, n_dims, ne.data());
if (copy_data && tensor.numel() > 0) {
std::memcpy(result->data, tensor.data(), static_cast<size_t>(ggml_nbytes(result)));
}
return result;
}
template <typename T>
inline Tensor<T> load_tensor_from_file_as_tensor(const std::string& file_path) {
std::ifstream file(file_path, std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open tensor file: " + file_path);
}
int32_t n_dims = 0;
int32_t length = 0;
int32_t ttype = 0;
file.read(reinterpret_cast<char*>(&n_dims), sizeof(n_dims));
file.read(reinterpret_cast<char*>(&length), sizeof(length));
file.read(reinterpret_cast<char*>(&ttype), sizeof(ttype));
if (!file.good()) {
throw std::runtime_error("incomplete tensor file header: " + file_path);
}
if (static_cast<ggml_type>(ttype) != GGMLTypeTraits<T>::type) {
throw std::invalid_argument("tensor file type does not match requested sd::Tensor type");
}
std::vector<int64_t> shape(4, 1);
for (int i = 0; i < n_dims; ++i) {
int32_t dim = 1;
file.read(reinterpret_cast<char*>(&dim), sizeof(dim));
shape[static_cast<size_t>(i)] = dim;
}
std::string name(static_cast<size_t>(length), '\0');
file.read(name.data(), length);
shape.resize(static_cast<size_t>(n_dims));
Tensor<T> tensor(shape);
file.read(reinterpret_cast<char*>(tensor.data()), static_cast<std::streamsize>(tensor.numel() * sizeof(T)));
if (!file.good()) {
throw std::runtime_error("incomplete tensor file data: " + file_path);
}
return tensor;
}
} // namespace sd
#endif

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,10 @@
#include <unordered_map>
#include <vector>
#include "condition_cache_utils.hpp"
#include "denoiser.hpp"
#include "ggml_extend.hpp"
#include "tensor.hpp"
struct UCacheConfig {
bool enabled = false;
@ -29,15 +31,15 @@ struct UCacheCacheEntry {
struct UCacheState {
UCacheConfig config;
Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f;
bool initialized = false;
bool initial_step = true;
bool skip_current_step = false;
bool step_active = false;
const SDCondition* anchor_condition = nullptr;
std::unordered_map<const SDCondition*, UCacheCacheEntry> cache_diffs;
Denoiser* denoiser = nullptr;
float start_sigma = std::numeric_limits<float>::max();
float end_sigma = 0.0f;
bool initialized = false;
bool initial_step = true;
bool skip_current_step = false;
bool step_active = false;
const void* anchor_condition = nullptr;
std::unordered_map<const void*, UCacheCacheEntry> cache_diffs;
std::vector<float> prev_input;
std::vector<float> prev_output;
float output_prev_norm = 0.0f;
@ -233,43 +235,30 @@ struct UCacheState {
return base_threshold * multiplier;
}
bool has_cache(const SDCondition* cond) const {
bool has_cache(const void* cond) const {
auto it = cache_diffs.find(cond);
return it != cache_diffs.end() && !it->second.diff.empty();
}
void update_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
void update_cache(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
UCacheCacheEntry& entry = cache_diffs[cond];
size_t ne = static_cast<size_t>(ggml_nelements(output));
entry.diff.resize(ne);
float* out_data = (float*)output->data;
float* in_data = (float*)input->data;
for (size_t i = 0; i < ne; ++i) {
entry.diff[i] = out_data[i] - in_data[i];
}
sd::store_condition_cache_diff(&entry.diff, input, output);
}
void apply_cache(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
void apply_cache(const void* cond, const sd::Tensor<float>& input, sd::Tensor<float>* output) {
auto it = cache_diffs.find(cond);
if (it == cache_diffs.end() || it->second.diff.empty()) {
return;
}
copy_ggml_tensor(output, input);
float* out_data = (float*)output->data;
const std::vector<float>& diff = it->second.diff;
for (size_t i = 0; i < diff.size(); ++i) {
out_data[i] += diff[i];
}
sd::apply_condition_cache_diff(it->second.diff, input, output);
}
bool before_condition(const SDCondition* cond,
ggml_tensor* input,
ggml_tensor* output,
bool before_condition(const void* cond,
const sd::Tensor<float>& input,
sd::Tensor<float>* output,
float sigma,
int step_index) {
if (!enabled() || step_index < 0) {
if (!enabled() || step_index < 0 || output == nullptr) {
return false;
}
if (step_index != current_step_index) {
@ -302,13 +291,13 @@ struct UCacheState {
return false;
}
size_t ne = static_cast<size_t>(ggml_nelements(input));
size_t ne = static_cast<size_t>(input.numel());
if (prev_input.size() != ne) {
return false;
}
float* input_data = (float*)input->data;
last_input_change = 0.0f;
const float* input_data = input.data();
last_input_change = 0.0f;
for (size_t i = 0; i < ne; ++i) {
last_input_change += std::fabs(input_data[i] - prev_input[i]);
}
@ -354,7 +343,7 @@ struct UCacheState {
return false;
}
void after_condition(const SDCondition* cond, ggml_tensor* input, ggml_tensor* output) {
void after_condition(const void* cond, const sd::Tensor<float>& input, const sd::Tensor<float>& output) {
if (!step_is_active()) {
return;
}
@ -367,16 +356,16 @@ struct UCacheState {
steps_computed_since_active++;
consecutive_skipped_steps = 0;
size_t ne = static_cast<size_t>(ggml_nelements(input));
float* in_data = (float*)input->data;
size_t ne = static_cast<size_t>(input.numel());
const float* in_data = input.data();
prev_input.resize(ne);
for (size_t i = 0; i < ne; ++i) {
prev_input[i] = in_data[i];
}
has_prev_input = true;
float* out_data = (float*)output->data;
float output_change = 0.0f;
const float* out_data = output.data();
float output_change = 0.0f;
if (has_prev_output && prev_output.size() == ne) {
for (size_t i = 0; i < ne; ++i) {
output_change += std::fabs(out_data[i] - prev_output[i]);

View File

@ -609,30 +609,31 @@ struct UNetModelRunner : public GGMLRunner {
unet.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* c_concat = nullptr,
ggml_tensor* y = nullptr,
int num_video_frames = -1,
std::vector<ggml_tensor*> controls = {},
float control_strength = 0.f) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<float>& c_concat_tensor = {},
const sd::Tensor<float>& y_tensor = {},
int num_video_frames = -1,
const std::vector<sd::Tensor<float>>& controls_tensor = {},
float control_strength = 0.f) {
ggml_cgraph* gf = new_graph_custom(UNET_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
ggml_tensor* y = make_optional_input(y_tensor);
std::vector<ggml_tensor*> controls;
controls.reserve(controls_tensor.size());
for (const auto& control_tensor : controls_tensor) {
controls.push_back(make_input(control_tensor));
}
if (num_video_frames == -1) {
num_video_frames = static_cast<int>(x->ne[3]);
}
x = to_backend(x);
context = to_backend(context);
y = to_backend(y);
timesteps = to_backend(timesteps);
c_concat = to_backend(c_concat);
for (int i = 0; i < controls.size(); i++) {
controls[i] = to_backend(controls[i]);
}
auto runner_ctx = get_context();
ggml_tensor* out = unet.forward(&runner_ctx,
@ -650,17 +651,15 @@ struct UNetModelRunner : public GGMLRunner {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* c_concat,
ggml_tensor* y,
int num_video_frames = -1,
std::vector<ggml_tensor*> controls = {},
float control_strength = 0.f,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<float>& c_concat = {},
const sd::Tensor<float>& y = {},
int num_video_frames = -1,
const std::vector<sd::Tensor<float>>& controls = {},
float control_strength = 0.f) {
// x: [N, in_channels, h, w]
// timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
@ -670,7 +669,7 @@ struct UNetModelRunner : public GGMLRunner {
return build_graph(x, timesteps, context, c_concat, y, num_video_frames, controls, control_strength);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
void test() {
@ -679,8 +678,8 @@ struct UNetModelRunner : public GGMLRunner {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass
@ -689,27 +688,37 @@ struct UNetModelRunner : public GGMLRunner {
// CUDA, num_video_frames = 3, x{num_video_frames, 8, 8, 8}: nan
int num_video_frames = 3;
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 8, num_video_frames);
sd::Tensor<float> x({8, 8, 8, num_video_frames});
std::vector<float> timesteps_vec(num_video_frames, 999.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
ggml_set_f32(x, 0.5f);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
x.fill_(0.5f);
// print_ggml_tensor(x);
auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 1024, 1, num_video_frames);
ggml_set_f32(context, 0.5f);
sd::Tensor<float> context({1024, 1, num_video_frames});
context.fill_(0.5f);
// print_ggml_tensor(context);
auto y = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, num_video_frames);
ggml_set_f32(y, 0.5f);
sd::Tensor<float> y({768, num_video_frames});
y.fill_(0.5f);
// print_ggml_tensor(y);
ggml_tensor* out = nullptr;
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = compute(8,
x,
timesteps,
context,
{},
y,
num_video_frames,
{},
0.f);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("unet test done in %lldms", t1 - t0);
}
}

View File

@ -2,6 +2,7 @@
#include "ggml_extend.hpp"
#include "model.h"
#include "stable-diffusion.h"
#include "util.h"
struct UpscalerGGML {
ggml_backend_t backend = nullptr; // general backend
@ -64,6 +65,39 @@ struct UpscalerGGML {
return true;
}
sd::Tensor<float> upscale_tensor(const sd::Tensor<float>& input_tensor) {
sd::Tensor<float> upscaled;
if (tile_size <= 0 || (input_tensor.shape()[0] <= tile_size && input_tensor.shape()[1] <= tile_size)) {
upscaled = esrgan_upscaler->compute(n_threads, input_tensor);
} else {
auto on_processing = [&](const sd::Tensor<float>& input_tile) -> sd::Tensor<float> {
auto output_tile = esrgan_upscaler->compute(n_threads, input_tile);
if (output_tile.empty()) {
LOG_ERROR("esrgan compute failed while processing a tile");
return {};
}
return output_tile;
};
upscaled = process_tiles_2d(input_tensor,
static_cast<int>(input_tensor.shape()[0] * esrgan_upscaler->scale),
static_cast<int>(input_tensor.shape()[1] * esrgan_upscaler->scale),
esrgan_upscaler->scale,
tile_size,
tile_size,
0.25f,
false,
false,
on_processing);
}
esrgan_upscaler->free_compute_buffer();
if (upscaled.empty()) {
LOG_ERROR("esrgan compute failed");
return {};
}
return upscaled;
}
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) {
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
sd_image_t upscaled_image = {0, 0, 0, nullptr};
@ -72,40 +106,17 @@ struct UpscalerGGML {
LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
input_image.width, input_image.height, output_width, output_height);
ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = nullptr;
params.no_alloc = false;
// draft context
ggml_context* upscale_ctx = ggml_init(params);
if (!upscale_ctx) {
LOG_ERROR("ggml_init() failed");
sd::Tensor<float> input_tensor = sd_image_to_tensor(input_image);
sd::Tensor<float> upscaled;
int64_t t0 = ggml_time_ms();
upscaled = upscale_tensor(input_tensor);
if (upscaled.empty()) {
return upscaled_image;
}
// LOG_DEBUG("upscale work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
ggml_tensor* input_image_tensor = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, input_image.width, input_image.height, 3, 1);
sd_image_to_ggml_tensor(input_image, input_image_tensor);
ggml_tensor* upscaled = ggml_new_tensor_4d(upscale_ctx, GGML_TYPE_F32, output_width, output_height, 3, 1);
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
return esrgan_upscaler->compute(n_threads, in, &out);
};
int64_t t0 = ggml_time_ms();
// TODO: circular upscaling?
sd_tiling(input_image_tensor, upscaled, esrgan_upscaler->scale, esrgan_upscaler->tile_size, 0.25f, false, false, on_tiling);
esrgan_upscaler->free_compute_buffer();
ggml_ext_tensor_clamp_inplace(upscaled, 0.f, 1.f);
uint8_t* upscaled_data = ggml_tensor_to_sd_image(upscaled);
ggml_free(upscale_ctx);
int64_t t3 = ggml_time_ms();
sd_image_t upscaled_data = tensor_to_sd_image(upscaled);
int64_t t3 = ggml_time_ms();
LOG_INFO("input_image_tensor upscaled, taking %.2fs", (t3 - t0) / 1000.0f);
upscaled_image = {
(uint32_t)output_width,
(uint32_t)output_height,
3,
upscaled_data,
};
upscaled_image = upscaled_data;
return upscaled_image;
}
};

View File

@ -479,158 +479,96 @@ const char* sd_get_system_info() {
return buffer;
}
sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image) {
sd_image_f32_t converted_image;
converted_image.width = image.width;
converted_image.height = image.height;
converted_image.channel = image.channel;
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index) {
const auto& shape = tensor.shape();
GGML_ASSERT(shape.size() == 4 || shape.size() == 5);
int width = static_cast<int>(shape[0]);
int height = static_cast<int>(shape[1]);
int channel = static_cast<int>(shape[shape.size() == 5 ? 3 : 2]);
uint8_t* data = (uint8_t*)malloc(static_cast<size_t>(width * height * channel));
GGML_ASSERT(data != nullptr);
// Allocate memory for float data
converted_image.data = (float*)malloc(image.width * image.height * image.channel * sizeof(float));
for (uint32_t i = 0; i < image.width * image.height * image.channel; i++) {
// Convert uint8_t to float
converted_image.data[i] = (float)image.data[i];
}
return converted_image;
}
// Function to perform double linear interpolation
float interpolate(float v1, float v2, float v3, float v4, float x_ratio, float y_ratio) {
return v1 * (1 - x_ratio) * (1 - y_ratio) + v2 * x_ratio * (1 - y_ratio) + v3 * (1 - x_ratio) * y_ratio + v4 * x_ratio * y_ratio;
}
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height) {
sd_image_f32_t resized_image;
resized_image.width = target_width;
resized_image.height = target_height;
resized_image.channel = image.channel;
// Allocate memory for resized float data
resized_image.data = (float*)malloc(target_width * target_height * image.channel * sizeof(float));
for (int y = 0; y < target_height; y++) {
for (int x = 0; x < target_width; x++) {
float original_x = (float)x * image.width / target_width;
float original_y = (float)y * image.height / target_height;
uint32_t x1 = (uint32_t)original_x;
uint32_t y1 = (uint32_t)original_y;
uint32_t x2 = std::min(x1 + 1, image.width - 1);
uint32_t y2 = std::min(y1 + 1, image.height - 1);
for (uint32_t k = 0; k < image.channel; k++) {
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
float x_ratio = original_x - x1;
float y_ratio = original_y - y1;
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
*(resized_image.data + y * target_width * image.channel + x * image.channel + k) = value;
for (int iw = 0; iw < width; ++iw) {
for (int ih = 0; ih < height; ++ih) {
for (int ic = 0; ic < channel; ++ic) {
float value = shape.size() == 5 ? tensor.index(iw, ih, frame_index, ic, 0)
: tensor.index(iw, ih, ic, frame_index);
value = std::clamp(value, 0.0f, 1.0f);
data[(ih * width + iw) * channel + ic] = static_cast<uint8_t>(std::round(value * 255.0f));
}
}
}
return resized_image;
return {
static_cast<uint32_t>(width),
static_cast<uint32_t>(height),
static_cast<uint32_t>(channel),
data,
};
}
void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]) {
for (uint32_t y = 0; y < image.height; y++) {
for (uint32_t x = 0; x < image.width; x++) {
for (uint32_t k = 0; k < image.channel; k++) {
int index = (y * image.width + x) * image.channel + k;
image.data[index] = (image.data[index] - means[k]) / stds[k];
sd::Tensor<float> sd_image_to_tensor(sd_image_t image,
int target_width,
int target_height,
bool scale) {
sd::Tensor<float> tensor = sd::zeros<float>({static_cast<int64_t>(image.width),
static_cast<int64_t>(image.height),
static_cast<int64_t>(image.channel),
1});
for (uint32_t iw = 0; iw < image.width; ++iw) {
for (uint32_t ih = 0; ih < image.height; ++ih) {
for (uint32_t ic = 0; ic < image.channel; ++ic) {
tensor.index(iw, ih, ic, 0) = sd_image_get_f32(image, iw, ih, ic, scale);
}
}
}
if (target_width >= 0 && target_height >= 0 &&
(tensor.shape()[0] != target_width || tensor.shape()[1] != target_height)) {
tensor = sd::ops::interpolate(tensor,
{target_width,
target_height,
tensor.shape()[2],
tensor.shape()[3]});
}
return tensor;
}
// Constants for means and std
float means[3] = {0.48145466f, 0.4578275f, 0.40821073f};
float stds[3] = {0.26862954f, 0.26130258f, 0.27577711f};
// Function to clip and preprocess sd_image_f32_t
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height) {
float width_scale = (float)target_width / image.width;
float height_scale = (float)target_height / image.height;
sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height) {
GGML_ASSERT(image.dim() == 4);
GGML_ASSERT(image.shape()[2] == 3);
GGML_ASSERT(image.shape()[3] == 1);
GGML_ASSERT(target_width > 0 && target_height > 0);
float scale = std::fmax(width_scale, height_scale);
float width_scale = static_cast<float>(target_width) / static_cast<float>(image.shape()[0]);
float height_scale = static_cast<float>(target_height) / static_cast<float>(image.shape()[1]);
float scale = std::fmax(width_scale, height_scale);
// Interpolation
int resized_width = (int)(scale * image.width);
int resized_height = (int)(scale * image.height);
float* resized_data = (float*)malloc(resized_width * resized_height * image.channel * sizeof(float));
int64_t resized_width = static_cast<int64_t>(scale * static_cast<float>(image.shape()[0]));
int64_t resized_height = static_cast<int64_t>(scale * static_cast<float>(image.shape()[1]));
for (int y = 0; y < resized_height; y++) {
for (int x = 0; x < resized_width; x++) {
float original_x = (float)x * image.width / resized_width;
float original_y = (float)y * image.height / resized_height;
sd::Tensor<float> resized = sd::ops::interpolate(
image,
{resized_width, resized_height, image.shape()[2], image.shape()[3]});
uint32_t x1 = (uint32_t)original_x;
uint32_t y1 = (uint32_t)original_y;
uint32_t x2 = std::min(x1 + 1, image.width - 1);
uint32_t y2 = std::min(y1 + 1, image.height - 1);
int64_t h_offset = std::max<int64_t>((resized_height - target_height) / 2, 0);
int64_t w_offset = std::max<int64_t>((resized_width - target_width) / 2, 0);
for (uint32_t k = 0; k < image.channel; k++) {
float v1 = *(image.data + y1 * image.width * image.channel + x1 * image.channel + k);
float v2 = *(image.data + y1 * image.width * image.channel + x2 * image.channel + k);
float v3 = *(image.data + y2 * image.width * image.channel + x1 * image.channel + k);
float v4 = *(image.data + y2 * image.width * image.channel + x2 * image.channel + k);
float x_ratio = original_x - x1;
float y_ratio = original_y - y1;
float value = interpolate(v1, v2, v3, v4, x_ratio, y_ratio);
*(resized_data + y * resized_width * image.channel + x * image.channel + k) = value;
sd::Tensor<float> cropped({target_width, target_height, image.shape()[2], image.shape()[3]});
for (int64_t y = 0; y < target_height; ++y) {
for (int64_t x = 0; x < target_width; ++x) {
for (int64_t c = 0; c < image.shape()[2]; ++c) {
cropped.index(x, y, c, 0) = resized.index(x + w_offset, y + h_offset, c, 0);
}
}
}
// Clip and preprocess
int h_offset = std::max((int)(resized_height - target_height) / 2, 0);
int w_offset = std::max((int)(resized_width - target_width) / 2, 0);
sd_image_f32_t result;
result.width = target_width;
result.height = target_height;
result.channel = image.channel;
result.data = (float*)malloc(target_height * target_width * image.channel * sizeof(float));
for (uint32_t k = 0; k < image.channel; k++) {
for (uint32_t i = 0; i < result.height; i++) {
for (uint32_t j = 0; j < result.width; j++) {
int src_y = std::min(static_cast<int>(i + h_offset), resized_height - 1);
int src_x = std::min(static_cast<int>(j + w_offset), resized_width - 1);
*(result.data + i * result.width * image.channel + j * image.channel + k) =
fmin(fmax(*(resized_data + src_y * resized_width * image.channel + src_x * image.channel + k), 0.0f), 255.0f) / 255.0f;
}
}
}
// Free allocated memory
free(resized_data);
// Normalize
for (uint32_t k = 0; k < image.channel; k++) {
for (uint32_t i = 0; i < result.height; i++) {
for (uint32_t j = 0; j < result.width; j++) {
// *(result.data + i * size * image.channel + j * image.channel + k) = 0.5f;
int offset = i * result.width * image.channel + j * image.channel + k;
float value = *(result.data + offset);
value = (value - means[k]) / stds[k];
// value = 0.5f;
*(result.data + offset) = value;
}
}
}
return result;
sd::Tensor<float> normalized = sd::ops::clamp(cropped, 0.0f, 1.0f);
sd::Tensor<float> mean({1, 1, 3, 1}, {means[0], means[1], means[2]});
sd::Tensor<float> std({1, 1, 3, 1}, {stds[0], stds[1], stds[2]});
return (normalized - mean) / std;
}
// Ref: https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cad87bf4e3e0b0a759afa94e933527c3123d59bc/modules/prompt_parser.py#L345

View File

@ -7,6 +7,7 @@
#include <vector>
#include "stable-diffusion.h"
#include "tensor.hpp"
#define SAFE_STR(s) ((s) ? (s) : "")
#define BOOL_STR(b) ((b) ? "true" : "false")
@ -29,20 +30,14 @@ std::string utf32_to_utf8(const std::u32string& utf32_str);
std::u32string unicode_value_to_utf32(int unicode_value);
// std::string sd_basename(const std::string& path);
typedef struct {
uint32_t width;
uint32_t height;
uint32_t channel;
float* data;
} sd_image_f32_t;
sd_image_t tensor_to_sd_image(const sd::Tensor<float>& tensor, int frame_index = 0);
void normalize_sd_image_f32_t(sd_image_f32_t image, float means[3], float stds[3]);
sd::Tensor<float> sd_image_to_tensor(sd_image_t image,
int target_width = -1,
int target_height = -1,
bool scale = true);
sd_image_f32_t sd_image_t_to_sd_image_f32_t(sd_image_t image);
sd_image_f32_t resize_sd_image_f32_t(sd_image_f32_t image, int target_width, int target_height);
sd_image_f32_t clip_preprocess(sd_image_f32_t image, int target_width, int target_height);
sd::Tensor<float> clip_preprocess(const sd::Tensor<float>& image, int target_width, int target_height);
class MmapWrapper {
public:

View File

@ -2,16 +2,64 @@
#define __VAE_HPP__
#include "common_block.hpp"
#include "tensor_ggml.hpp"
struct VAE : public GGMLRunner {
protected:
SDVersion version;
bool scale_input = true;
virtual bool _compute(const int n_threads,
ggml_tensor* z,
bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx) = 0;
bool scale_input = true;
virtual sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z,
bool decode_graph) = 0;
static inline void scale_tensor_to_minus1_1(sd::Tensor<float>* tensor) {
GGML_ASSERT(tensor != nullptr);
for (int64_t i = 0; i < tensor->numel(); ++i) {
(*tensor)[i] = (*tensor)[i] * 2.0f - 1.0f;
}
}
static inline void scale_tensor_to_0_1(sd::Tensor<float>* tensor) {
GGML_ASSERT(tensor != nullptr);
for (int64_t i = 0; i < tensor->numel(); ++i) {
float value = ((*tensor)[i] + 1.0f) * 0.5f;
(*tensor)[i] = std::max(0.0f, std::min(1.0f, value));
}
}
sd::Tensor<float> tiled_compute(const sd::Tensor<float>& input,
int n_threads,
int output_width,
int output_height,
int scale,
int p_tile_size_x,
int p_tile_size_y,
float tile_overlap_factor,
bool circular_x,
bool circular_y,
bool decode_graph,
const char* error_message,
bool silent = false) {
auto on_processing = [&](const sd::Tensor<float>& input_tile) {
auto output_tile = _compute(n_threads, input_tile, decode_graph);
if (output_tile.empty()) {
LOG_ERROR("%s", error_message);
return sd::Tensor<float>();
}
return output_tile;
};
return ::process_tiles_2d(input,
output_width,
output_height,
scale,
p_tile_size_x,
p_tile_size_y,
tile_overlap_factor,
circular_x,
circular_y,
on_processing,
silent);
}
public:
VAE(SDVersion version, ggml_backend_t backend, bool offload_params_to_cpu)
@ -60,133 +108,109 @@ public:
tile_size_y = get_tile_size(params.tile_size_y, params.rel_size_y, latent_y);
}
ggml_tensor* encode(int n_threads,
ggml_context* work_ctx,
ggml_tensor* x,
sd_tiling_params_t tiling_params,
bool circular_x = false,
bool circular_y = false) {
int64_t t0 = ggml_time_ms();
ggml_tensor* result = nullptr;
const int scale_factor = get_scale_factor();
int64_t W = x->ne[0] / scale_factor;
int64_t H = x->ne[1] / scale_factor;
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
int64_t C = get_encoder_output_channels(static_cast<int>(x->ne[channel_dim]));
int64_t ne2;
int64_t ne3;
if (sd_version_is_wan(version)) {
int64_t T = x->ne[2];
ne2 = (T - 1) / 4 + 1;
ne3 = C;
} else {
ne2 = C;
ne3 = x->ne[3];
}
result = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, ne2, ne3);
sd::Tensor<float> encode(int n_threads,
const sd::Tensor<float>& x,
sd_tiling_params_t tiling_params,
bool circular_x = false,
bool circular_y = false) {
int64_t t0 = ggml_time_ms();
sd::Tensor<float> input = x;
sd::Tensor<float> output;
if (scale_input) {
scale_to_minus1_1(x);
}
if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) {
x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]);
scale_tensor_to_minus1_1(&input);
}
if (tiling_params.enabled) {
const int scale_factor = get_scale_factor();
int64_t W = input.shape()[0] / scale_factor;
int64_t H = input.shape()[1] / scale_factor;
float tile_overlap;
int tile_size_x, tile_size_y;
// multiply tile size for encode to keep the compute buffer size consistent
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, W, H, 1.30539f);
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
return _compute(n_threads, in, false, &out, work_ctx);
};
sd_tiling_non_square(x, result, scale_factor, tile_size_x, tile_size_y, tile_overlap, circular_x, circular_y, on_tiling);
output = tiled_compute(input,
n_threads,
static_cast<int>(W),
static_cast<int>(H),
scale_factor,
tile_size_x,
tile_size_y,
tile_overlap,
circular_x,
circular_y,
false,
"vae encode compute failed while processing a tile");
} else {
_compute(n_threads, x, false, &result, work_ctx);
output = _compute(n_threads, input, false);
free_compute_buffer();
}
free_compute_buffer();
if (output.empty()) {
LOG_ERROR("vae encode compute failed");
return {};
}
int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing vae encode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
return result;
return std::move(output);
}
ggml_tensor* decode(int n_threads,
ggml_context* work_ctx,
ggml_tensor* x,
sd_tiling_params_t tiling_params,
bool decode_video = false,
bool circular_x = false,
bool circular_y = false,
ggml_tensor* result = nullptr,
bool silent = false) {
const int scale_factor = get_scale_factor();
int64_t W = x->ne[0] * scale_factor;
int64_t H = x->ne[1] * scale_factor;
int64_t C = 3;
if (result == nullptr) {
if (decode_video) {
int64_t T = x->ne[2];
if (sd_version_is_wan(version)) {
T = ((T - 1) * 4) + 1;
}
result = ggml_new_tensor_4d(work_ctx,
GGML_TYPE_F32,
W,
H,
T,
3);
} else {
result = ggml_new_tensor_4d(work_ctx,
GGML_TYPE_F32,
W,
H,
C,
x->ne[3]);
}
}
int64_t t0 = ggml_time_ms();
if (sd_version_is_qwen_image(version) || sd_version_is_anima(version)) {
x = ggml_reshape_4d(work_ctx, x, x->ne[0], x->ne[1], 1, x->ne[2] * x->ne[3]);
}
sd::Tensor<float> decode(int n_threads,
const sd::Tensor<float>& x,
sd_tiling_params_t tiling_params,
bool decode_video = false,
bool circular_x = false,
bool circular_y = false,
bool silent = false) {
int64_t t0 = ggml_time_ms();
sd::Tensor<float> input = x;
sd::Tensor<float> output;
if (tiling_params.enabled) {
const int scale_factor = get_scale_factor();
int64_t W = input.shape()[0] * scale_factor;
int64_t H = input.shape()[1] * scale_factor;
float tile_overlap;
int tile_size_x, tile_size_y;
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, x->ne[0], x->ne[1]);
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, tiling_params, input.shape()[0], input.shape()[1]);
if (!silent) {
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
}
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
return _compute(n_threads, in, true, &out, nullptr);
};
sd_tiling_non_square(x, result, scale_factor, tile_size_x, tile_size_y, tile_overlap, circular_x, circular_y, on_tiling, silent);
output = tiled_compute(
input,
n_threads,
static_cast<int>(W),
static_cast<int>(H),
scale_factor,
tile_size_x,
tile_size_y,
tile_overlap,
circular_x,
circular_y,
true,
"vae decode compute failed while processing a tile",
silent);
} else {
if (!_compute(n_threads, x, true, &result, work_ctx)) {
LOG_ERROR("Failed to decode latetnts");
free_compute_buffer();
return nullptr;
}
output = _compute(n_threads, input, true);
}
free_compute_buffer();
if (output.empty()) {
LOG_ERROR("vae decode compute failed");
return {};
}
if (scale_input) {
scale_to_0_1(result);
scale_tensor_to_0_1(&output);
}
int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing vae decode graph completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
ggml_ext_tensor_clamp_inplace(result, 0.0f, 1.0f);
return result;
return std::move(output);
}
virtual ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) = 0;
virtual ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) = 0;
virtual ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) = 0;
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) = 0;
virtual sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) = 0;
virtual sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) = 0;
virtual sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) = 0;
virtual void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) = 0;
virtual void set_conv2d_scale(float scale) { SD_UNUSED(scale); };
};
@ -198,31 +222,25 @@ struct FakeVAE : public VAE {
return input_channels;
}
bool _compute(const int n_threads,
ggml_tensor* z,
bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx) override {
if (*output == nullptr && output_ctx != nullptr) {
*output = ggml_dup_tensor(output_ctx, z);
}
ggml_ext_tensor_iter(z, [&](ggml_tensor* z, int64_t i0, int64_t i1, int64_t i2, int64_t i3) {
float value = ggml_ext_tensor_get_f32(z, i0, i1, i2, i3);
ggml_ext_tensor_set_f32(*output, value, i0, i1, i2, i3);
});
return true;
sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z,
bool decode_graph) override {
SD_UNUSED(n_threads);
SD_UNUSED(decode_graph);
return z;
}
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
SD_UNUSED(rng);
return vae_output;
}
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
return latents;
}
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
return ggml_ext_dup_and_cpy_tensor(work_ctx, latents);
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
return latents;
}
void get_param_tensors(std::map<std::string, ggml_tensor*>& tensors, const std::string prefix) override {}

View File

@ -1131,105 +1131,66 @@ namespace WAN {
ae.get_param_tensors(tensors, prefix);
}
ggml_tensor* vae_output_to_latents(ggml_context* work_ctx, ggml_tensor* vae_output, std::shared_ptr<RNG> rng) {
sd::Tensor<float> vae_output_to_latents(const sd::Tensor<float>& vae_output, std::shared_ptr<RNG> rng) override {
SD_UNUSED(rng);
return vae_output;
}
void get_latents_mean_std_vec(ggml_tensor* latents, int channel_dim, std::vector<float>& latents_mean_vec, std::vector<float>& latents_std_vec) {
GGML_ASSERT(latents->ne[channel_dim] == 16 || latents->ne[channel_dim] == 48);
if (latents->ne[channel_dim] == 16) { // Wan2.1 VAE
latents_mean_vec = {-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f,
0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f};
latents_std_vec = {2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f,
3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f};
} else if (latents->ne[channel_dim] == 48) { // Wan2.2 VAE
latents_mean_vec = {-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
-0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
-0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
-0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
-0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f};
latents_std_vec = {
0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f};
std::pair<sd::Tensor<float>, sd::Tensor<float>> get_latents_mean_std(const sd::Tensor<float>& latents) {
int channel_dim = latents.dim() == 5 ? 3 : 2;
std::vector<int64_t> stats_shape(static_cast<size_t>(latents.dim()), 1);
if (latents.shape()[channel_dim] == 16) { // Wan2.1 VAE
stats_shape[static_cast<size_t>(channel_dim)] = 16;
auto mean_tensor = sd::Tensor<float>::from_vector({-0.7571f, -0.7089f, -0.9113f, 0.1075f, -0.1745f, 0.9653f, -0.1517f, 1.5508f,
0.4134f, -0.0715f, 0.5517f, -0.3632f, -0.1922f, -0.9497f, 0.2503f, -0.2921f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({2.8184f, 1.4541f, 2.3275f, 2.6558f, 1.2196f, 1.7708f, 2.6052f, 2.0743f,
3.2687f, 2.1526f, 2.8652f, 1.5579f, 1.6382f, 1.1253f, 2.8251f, 1.9160f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
}
if (latents.shape()[channel_dim] == 48) { // Wan2.2 VAE
stats_shape[static_cast<size_t>(channel_dim)] = 48;
auto mean_tensor = sd::Tensor<float>::from_vector({-0.2289f, -0.0052f, -0.1323f, -0.2339f, -0.2799f, 0.0174f, 0.1838f, 0.1557f,
-0.1382f, 0.0542f, 0.2813f, 0.0891f, 0.1570f, -0.0098f, 0.0375f, -0.1825f,
-0.2246f, -0.1207f, -0.0698f, 0.5109f, 0.2665f, -0.2108f, -0.2158f, 0.2502f,
-0.2055f, -0.0322f, 0.1109f, 0.1567f, -0.0729f, 0.0899f, -0.2799f, -0.1230f,
-0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f,
0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f});
mean_tensor.reshape_(stats_shape);
auto std_tensor = sd::Tensor<float>::from_vector({0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f,
0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f,
0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f,
0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f,
0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f,
0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f});
std_tensor.reshape_(stats_shape);
return {std::move(mean_tensor), std::move(std_tensor)};
}
GGML_ABORT("unexpected latent channel dimension %lld for version %d",
(long long)latents.shape()[channel_dim],
version);
}
ggml_tensor* diffusion_to_vae_latents(ggml_context* work_ctx, ggml_tensor* latents) {
ggml_tensor* vae_latents = ggml_dup(work_ctx, latents);
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
std::vector<float> latents_mean_vec;
std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = value * std_ / scale_factor + mean;
ggml_ext_tensor_set_f32(vae_latents, value, l, k, j, i);
}
}
}
}
return vae_latents;
sd::Tensor<float> diffusion_to_vae_latents(const sd::Tensor<float>& latents) override {
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents);
return (latents * std_tensor) / scale_factor + mean_tensor;
}
ggml_tensor* vae_to_diffuison_latents(ggml_context* work_ctx, ggml_tensor* latents) {
ggml_tensor* diffusion_latents = ggml_dup(work_ctx, latents);
int channel_dim = sd_version_is_wan(version) ? 3 : 2;
std::vector<float> latents_mean_vec;
std::vector<float> latents_std_vec;
get_latents_mean_std_vec(latents, channel_dim, latents_mean_vec, latents_std_vec);
float mean;
float std_;
for (int i = 0; i < latents->ne[3]; i++) {
if (channel_dim == 3) {
mean = latents_mean_vec[i];
std_ = latents_std_vec[i];
}
for (int j = 0; j < latents->ne[2]; j++) {
if (channel_dim == 2) {
mean = latents_mean_vec[j];
std_ = latents_std_vec[j];
}
for (int k = 0; k < latents->ne[1]; k++) {
for (int l = 0; l < latents->ne[0]; l++) {
float value = ggml_ext_tensor_get_f32(latents, l, k, j, i);
value = (value - mean) * scale_factor / std_;
ggml_ext_tensor_set_f32(diffusion_latents, value, l, k, j, i);
}
}
}
}
return diffusion_latents;
sd::Tensor<float> vae_to_diffusion_latents(const sd::Tensor<float>& latents) override {
auto [mean_tensor, std_tensor] = get_latents_mean_std(latents);
return ((latents - mean_tensor) * scale_factor) / std_tensor;
}
int get_encoder_output_channels(int input_channels) {
return static_cast<int>(ae.z_dim);
}
ggml_cgraph* build_graph(ggml_tensor* z, bool decode_graph) {
ggml_cgraph* gf = new_graph_custom(10240 * z->ne[2]);
z = to_backend(z);
ggml_cgraph* build_graph(const sd::Tensor<float>& z_tensor, bool decode_graph) {
ggml_cgraph* gf = new_graph_custom(10240 * z_tensor.shape()[2]);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
@ -1240,7 +1201,7 @@ namespace WAN {
return gf;
}
ggml_cgraph* build_graph_partial(ggml_tensor* z, bool decode_graph, int i) {
ggml_cgraph* build_graph_partial(const sd::Tensor<float>& z_tensor, bool decode_graph, int i) {
ggml_cgraph* gf = new_graph_custom(20480);
ae.clear_cache();
@ -1250,7 +1211,7 @@ namespace WAN {
ae._feat_map[feat_idx] = feat_cache;
}
z = to_backend(z);
ggml_tensor* z = make_input(z_tensor);
auto runner_ctx = get_context();
@ -1269,58 +1230,57 @@ namespace WAN {
return gf;
}
bool _compute(const int n_threads,
ggml_tensor* z,
bool decode_graph,
ggml_tensor** output,
ggml_context* output_ctx = nullptr) override {
sd::Tensor<float> _compute(const int n_threads,
const sd::Tensor<float>& z,
bool decode_graph) override {
if (true) {
sd::Tensor<float> input;
if (z.dim() == 4) {
input = z.unsqueeze(2);
}
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(z, decode_graph);
if (input.empty()) {
return build_graph(z, decode_graph);
} else {
return build_graph(input, decode_graph);
}
};
return GGMLRunner::compute(get_graph, n_threads, true, output, output_ctx);
auto result = restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, true),
input.empty() ? z.dim() : input.dim());
if (!result.empty() && z.dim() == 4) {
result.squeeze_(2);
}
return result;
} else { // chunk 1 result is weird
ae.clear_cache();
int64_t t = z->ne[2];
int64_t t = z.shape()[2];
int i = 0;
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph_partial(z, decode_graph, i);
};
ggml_tensor* out = nullptr;
bool res = GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
auto out_opt = GGMLRunner::compute<float>(get_graph, n_threads, true);
if (!out_opt.has_value()) {
return {};
}
sd::Tensor<float> out = std::move(*out_opt);
ae.clear_cache();
if (t == 1) {
*output = out;
return res;
return out;
}
*output = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], (t - 1) * 4 + 1, out->ne[3]);
auto copy_to_output = [&]() {
for (int64_t i3 = 0; i3 < out->ne[3]; i3++) {
for (int64_t i2 = 0; i2 < out->ne[2]; i2++) {
for (int64_t i1 = 0; i1 < out->ne[1]; i1++) {
for (int64_t i0 = 0; i0 < out->ne[0]; i0++) {
float value = ggml_ext_tensor_get_f32(out, i0, i1, i2, i3);
int64_t offset = (i == 0) ? 0 : (1 + (i - 1) * 4);
ggml_ext_tensor_set_f32(*output, value, i0, i1, offset + i2, i3);
}
}
}
}
};
copy_to_output();
out = ggml_new_tensor_4d(output_ctx, GGML_TYPE_F32, out->ne[0], out->ne[1], 4, out->ne[3]);
sd::Tensor<float> output = std::move(out);
for (i = 1; i < t; i++) {
res = res || GGMLRunner::compute(get_graph, n_threads, true, &out);
auto chunk_opt = GGMLRunner::compute<float>(get_graph, n_threads, true);
if (!chunk_opt.has_value()) {
return {};
}
out = std::move(*chunk_opt);
ae.clear_cache();
copy_to_output();
output = sd::ops::concat(output, out, 2);
}
free_cache_ctx_and_buffer();
return res;
return output;
}
}
@ -1330,25 +1290,25 @@ namespace WAN {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
if (true) {
// cpu f32, pass
// cpu f16, pass
// cuda f16, pass
// cuda f32, pass
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 2, 16);
ggml_set_f32(z, 0.5f);
z = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
print_ggml_tensor(z);
ggml_tensor* out = nullptr;
auto z = sd::load_tensor_from_file_as_tensor<float>("wan_vae_z.bin");
print_sd_tensor(z);
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
_compute(8, z, true, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = _compute(8, z, true);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("decode test done in %ldms", t1 - t0);
}
};
@ -2229,23 +2189,23 @@ namespace WAN {
wan.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* clip_fea = nullptr,
ggml_tensor* c_concat = nullptr,
ggml_tensor* time_dim_concat = nullptr,
ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor = {},
const sd::Tensor<float>& clip_fea_tensor = {},
const sd::Tensor<float>& c_concat_tensor = {},
const sd::Tensor<float>& time_dim_concat_tensor = {},
const sd::Tensor<float>& vace_context_tensor = {},
float vace_strength = 1.f) {
ggml_cgraph* gf = new_graph_custom(WAN_GRAPH_SIZE);
x = to_backend(x);
timesteps = to_backend(timesteps);
context = to_backend(context);
clip_fea = to_backend(clip_fea);
c_concat = to_backend(c_concat);
time_dim_concat = to_backend(time_dim_concat);
vace_context = to_backend(vace_context);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
ggml_tensor* context = make_optional_input(context_tensor);
ggml_tensor* clip_fea = make_optional_input(clip_fea_tensor);
ggml_tensor* c_concat = make_optional_input(c_concat_tensor);
ggml_tensor* time_dim_concat = make_optional_input(time_dim_concat_tensor);
ggml_tensor* vace_context = make_optional_input(vace_context_tensor);
pe_vec = Rope::gen_wan_pe(static_cast<int>(x->ne[2]),
static_cast<int>(x->ne[1]),
@ -2285,22 +2245,20 @@ namespace WAN {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
ggml_tensor* clip_fea = nullptr,
ggml_tensor* c_concat = nullptr,
ggml_tensor* time_dim_concat = nullptr,
ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context = {},
const sd::Tensor<float>& clip_fea = {},
const sd::Tensor<float>& c_concat = {},
const sd::Tensor<float>& time_dim_concat = {},
const sd::Tensor<float>& vace_context = {},
float vace_strength = 1.f) {
auto get_graph = [&]() -> ggml_cgraph* {
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
void test() {
@ -2309,36 +2267,38 @@ namespace WAN {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// cpu f16: pass
// cuda f16: pass
// cpu q8_0: pass
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 104, 60, 1, 16);
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 104, 60, 1, 16);
// ggml_set_f32(x, 0.01f);
auto x = load_tensor_from_file(work_ctx, "wan_dit_x.bin");
print_ggml_tensor(x);
auto x = sd::load_tensor_from_file_as_tensor<float>("wan_dit_x.bin");
print_sd_tensor(x);
std::vector<float> timesteps_vec(3, 1000.f);
timesteps_vec[0] = 0.f;
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 4096, 512, 1);
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 4096, 512, 1);
// ggml_set_f32(context, 0.01f);
auto context = load_tensor_from_file(work_ctx, "wan_dit_context.bin");
print_ggml_tensor(context);
// auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin");
auto context = sd::load_tensor_from_file_as_tensor<float>("wan_dit_context.bin");
print_sd_tensor(context);
// auto clip_fea = load_tensor_from_file(ctx, "wan_dit_clip_fea.bin");
// print_ggml_tensor(clip_fea);
ggml_tensor* out = nullptr;
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = compute(8, x, timesteps, context, {}, {}, {}, {}, 1.f);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("wan test done in %lldms", t1 - t0);
}
}

View File

@ -481,20 +481,21 @@ namespace ZImage {
z_image.get_param_tensors(tensors, prefix);
}
ggml_cgraph* build_graph(ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false) {
ggml_cgraph* build_graph(const sd::Tensor<float>& x_tensor,
const sd::Tensor<float>& timesteps_tensor,
const sd::Tensor<float>& context_tensor,
const std::vector<sd::Tensor<float>>& ref_latents_tensor = {},
bool increase_ref_index = false) {
ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
ggml_tensor* x = make_input(x_tensor);
ggml_tensor* timesteps = make_input(timesteps_tensor);
GGML_ASSERT(x->ne[3] == 1);
ggml_cgraph* gf = new_graph_custom(Z_IMAGE_GRAPH_SIZE);
x = to_backend(x);
context = to_backend(context);
timesteps = to_backend(timesteps);
for (int i = 0; i < ref_latents.size(); i++) {
ref_latents[i] = to_backend(ref_latents[i]);
GGML_ASSERT(!context_tensor.empty());
ggml_tensor* context = make_input(context_tensor);
std::vector<ggml_tensor*> ref_latents;
ref_latents.reserve(ref_latents_tensor.size());
for (const auto& ref_latent_tensor : ref_latents_tensor) {
ref_latents.push_back(make_input(ref_latent_tensor));
}
pe_vec = Rope::gen_z_image_pe(static_cast<int>(x->ne[1]),
@ -530,14 +531,12 @@ namespace ZImage {
return gf;
}
bool compute(int n_threads,
ggml_tensor* x,
ggml_tensor* timesteps,
ggml_tensor* context,
std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false,
ggml_tensor** output = nullptr,
ggml_context* output_ctx = nullptr) {
sd::Tensor<float> compute(int n_threads,
const sd::Tensor<float>& x,
const sd::Tensor<float>& timesteps,
const sd::Tensor<float>& context,
const std::vector<sd::Tensor<float>>& ref_latents = {},
bool increase_ref_index = false) {
// x: [N, in_channels, h, w]
// timesteps: [N, ]
// context: [N, max_position, hidden_size]
@ -545,7 +544,7 @@ namespace ZImage {
return build_graph(x, timesteps, context, ref_latents, increase_ref_index);
};
return GGMLRunner::compute(get_graph, n_threads, false, output, output_ctx);
return restore_trailing_singleton_dims(GGMLRunner::compute<float>(get_graph, n_threads, false), x.dim());
}
void test() {
@ -554,30 +553,37 @@ namespace ZImage {
params.mem_buffer = nullptr;
params.no_alloc = false;
ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != nullptr);
ggml_context* ctx = ggml_init(params);
GGML_ASSERT(ctx != nullptr);
{
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// auto x = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 16, 16, 16, 1);
// ggml_set_f32(x, 0.01f);
auto x = load_tensor_from_file(work_ctx, "./z_image_x.bin");
print_ggml_tensor(x);
auto x = sd::load_tensor_from_file_as_tensor<float>("./z_image_x.bin");
print_sd_tensor(x);
std::vector<float> timesteps_vec(1, 0.f);
auto timesteps = vector_to_ggml_tensor(work_ctx, timesteps_vec);
auto timesteps = sd::Tensor<float>::from_vector(timesteps_vec);
// auto context = ggml_new_tensor_3d(work_ctx, GGML_TYPE_F32, 2560, 256, 1);
// auto context = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 2560, 256, 1);
// ggml_set_f32(context, 0.01f);
auto context = load_tensor_from_file(work_ctx, "./z_image_context.bin");
print_ggml_tensor(context);
auto context = sd::load_tensor_from_file_as_tensor<float>("./z_image_context.bin");
print_sd_tensor(context);
ggml_tensor* out = nullptr;
sd::Tensor<float> out;
int64_t t0 = ggml_time_ms();
compute(8, x, timesteps, context, {}, false, &out, work_ctx);
int64_t t1 = ggml_time_ms();
int64_t t0 = ggml_time_ms();
auto out_opt = compute(8,
x,
timesteps,
context,
{},
false);
int64_t t1 = ggml_time_ms();
print_ggml_tensor(out);
GGML_ASSERT(!out_opt.empty());
out = std::move(out_opt);
print_sd_tensor(out);
LOG_DEBUG("z_image test done in %lldms", t1 - t0);
}
}