chore: add .clang-tidy configuration and apply modernize checks (#902)

This commit is contained in:
leejet 2025-10-18 23:23:40 +08:00 committed by GitHub
parent 64a7698347
commit d05e46ca5e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 766 additions and 747 deletions

10
.clang-tidy Normal file
View File

@ -0,0 +1,10 @@
Checks: >
modernize-make-shared,
modernize-use-nullptr,
modernize-use-override,
modernize-pass-by-value,
modernize-return-braced-init-list,
modernize-deprecated-headers,
HeaderFilterRegex: '^$'
WarningsAsErrors: ''
FormatStyle: none

View File

@ -550,7 +550,7 @@ protected:
int64_t num_positions; int64_t num_positions;
bool force_clip_f32; bool force_clip_f32;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type token_wtype = GGML_TYPE_F32; enum ggml_type token_wtype = GGML_TYPE_F32;
if (!force_clip_f32) { if (!force_clip_f32) {
token_wtype = get_type(prefix + "token_embedding.weight", tensor_types, GGML_TYPE_F32); token_wtype = get_type(prefix + "token_embedding.weight", tensor_types, GGML_TYPE_F32);
@ -587,7 +587,7 @@ public:
GGML_ASSERT(input_ids->ne[0] == position_embed_weight->ne[1]); GGML_ASSERT(input_ids->ne[0] == position_embed_weight->ne[1]);
input_ids = ggml_reshape_3d(ctx, input_ids, input_ids->ne[0], 1, input_ids->ne[1]); input_ids = ggml_reshape_3d(ctx, input_ids, input_ids->ne[0], 1, input_ids->ne[1]);
auto token_embedding = ggml_get_rows(ctx, custom_embed_weight != NULL ? custom_embed_weight : token_embed_weight, input_ids); auto token_embedding = ggml_get_rows(ctx, custom_embed_weight != nullptr ? custom_embed_weight : token_embed_weight, input_ids);
token_embedding = ggml_reshape_3d(ctx, token_embedding, token_embedding->ne[0], token_embedding->ne[1], token_embedding->ne[3]); token_embedding = ggml_reshape_3d(ctx, token_embedding, token_embedding->ne[0], token_embedding->ne[1], token_embedding->ne[3]);
// token_embedding + position_embedding // token_embedding + position_embedding
@ -606,7 +606,7 @@ protected:
int64_t image_size; int64_t image_size;
int64_t num_patches; int64_t num_patches;
int64_t num_positions; int64_t num_positions;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type patch_wtype = GGML_TYPE_F16; enum ggml_type patch_wtype = GGML_TYPE_F16;
enum ggml_type class_wtype = GGML_TYPE_F32; enum ggml_type class_wtype = GGML_TYPE_F32;
enum ggml_type position_wtype = GGML_TYPE_F32; enum ggml_type position_wtype = GGML_TYPE_F32;
@ -641,10 +641,10 @@ public:
// concat(patch_embedding, class_embedding) + position_embedding // concat(patch_embedding, class_embedding) + position_embedding
struct ggml_tensor* patch_embedding; struct ggml_tensor* patch_embedding;
int64_t N = pixel_values->ne[3]; int64_t N = pixel_values->ne[3];
patch_embedding = ggml_nn_conv_2d(ctx, pixel_values, patch_embed_weight, NULL, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size] patch_embedding = ggml_nn_conv_2d(ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
patch_embedding = ggml_reshape_3d(ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches] patch_embedding = ggml_reshape_3d(ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
patch_embedding = ggml_cont(ctx, ggml_permute(ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim] patch_embedding = ggml_cont(ctx, ggml_permute(ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
patch_embedding = ggml_reshape_4d(ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1] patch_embedding = ggml_reshape_4d(ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
struct ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, N); struct ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, N);
class_embedding = ggml_repeat(ctx, class_embed_weight, class_embedding); // [N, embed_dim] class_embedding = ggml_repeat(ctx, class_embed_weight, class_embedding); // [N, embed_dim]
@ -669,7 +669,7 @@ enum CLIPVersion {
class CLIPTextModel : public GGMLBlock { class CLIPTextModel : public GGMLBlock {
protected: protected:
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
if (version == OPEN_CLIP_VIT_BIGG_14) { if (version == OPEN_CLIP_VIT_BIGG_14) {
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size); params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
@ -735,8 +735,8 @@ public:
if (return_pooled) { if (return_pooled) {
auto text_projection = params["text_projection"]; auto text_projection = params["text_projection"];
ggml_tensor* pooled = ggml_view_1d(ctx, x, hidden_size, x->nb[1] * max_token_idx); ggml_tensor* pooled = ggml_view_1d(ctx, x, hidden_size, x->nb[1] * max_token_idx);
if (text_projection != NULL) { if (text_projection != nullptr) {
pooled = ggml_nn_linear(ctx, pooled, text_projection, NULL); pooled = ggml_nn_linear(ctx, pooled, text_projection, nullptr);
} else { } else {
LOG_DEBUG("identity projection"); LOG_DEBUG("identity projection");
} }
@ -814,7 +814,7 @@ protected:
int64_t out_features; int64_t out_features;
bool transpose_weight; bool transpose_weight;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32);
if (transpose_weight) { if (transpose_weight) {
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features); params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
@ -831,12 +831,12 @@ public:
out_features(out_features), out_features(out_features),
transpose_weight(transpose_weight) {} transpose_weight(transpose_weight) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
if (transpose_weight) { if (transpose_weight) {
w = ggml_cont(ctx, ggml_transpose(ctx, w)); w = ggml_cont(ctx, ggml_transpose(ctx, w));
} }
return ggml_nn_linear(ctx, x, w, NULL); return ggml_nn_linear(ctx, x, w, nullptr);
} }
}; };
@ -894,7 +894,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
model.init(params_ctx, tensor_types, prefix); model.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "clip"; return "clip";
} }
@ -921,7 +921,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids, struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
int num_custom_embeddings = 0, int num_custom_embeddings = 0,
void* custom_embeddings_data = NULL, void* custom_embeddings_data = nullptr,
size_t max_token_idx = 0, size_t max_token_idx = 0,
bool return_pooled = false, bool return_pooled = false,
int clip_skip = -1) { int clip_skip = -1) {
@ -929,9 +929,9 @@ struct CLIPTextModelRunner : public GGMLRunner {
input_ids = to_backend(input_ids); input_ids = to_backend(input_ids);
struct ggml_tensor* embeddings = NULL; struct ggml_tensor* embeddings = nullptr;
if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) { if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) {
auto token_embed_weight = model.get_token_embed_weight(); auto token_embed_weight = model.get_token_embed_weight();
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx, auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
token_embed_weight->type, token_embed_weight->type,
@ -958,7 +958,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
bool return_pooled, bool return_pooled,
int clip_skip, int clip_skip,
ggml_tensor** output, ggml_tensor** output,
ggml_context* output_ctx = NULL) { ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip); return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
}; };

View File

@ -121,7 +121,7 @@ public:
} }
} }
virtual struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = NULL) { virtual struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = nullptr) {
// For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml // For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml
// [N, c, t, h, w] => [N, c, t, h * w] // [N, c, t, h, w] => [N, c, t, h * w]
// x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w] // x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w]
@ -131,7 +131,7 @@ public:
auto out_layers_0 = std::dynamic_pointer_cast<GroupNorm32>(blocks["out_layers.0"]); auto out_layers_0 = std::dynamic_pointer_cast<GroupNorm32>(blocks["out_layers.0"]);
auto out_layers_3 = std::dynamic_pointer_cast<UnaryBlock>(blocks["out_layers.3"]); auto out_layers_3 = std::dynamic_pointer_cast<UnaryBlock>(blocks["out_layers.3"]);
if (emb == NULL) { if (emb == nullptr) {
GGML_ASSERT(skip_t_emb); GGML_ASSERT(skip_t_emb);
} }
@ -182,7 +182,7 @@ protected:
int64_t dim_in; int64_t dim_in;
int64_t dim_out; int64_t dim_out;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "proj.weight", tensor_types, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "proj.weight", tensor_types, GGML_TYPE_F32);
enum ggml_type bias_wtype = GGML_TYPE_F32; enum ggml_type bias_wtype = GGML_TYPE_F32;
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2); params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
@ -193,7 +193,7 @@ public:
GEGLU(int64_t dim_in, int64_t dim_out) GEGLU(int64_t dim_in, int64_t dim_out)
: dim_in(dim_in), dim_out(dim_out) {} : dim_in(dim_in), dim_out(dim_out) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [ne3, ne2, ne1, dim_in] // x: [ne3, ne2, ne1, dim_in]
// return: [ne3, ne2, ne1, dim_out] // return: [ne3, ne2, ne1, dim_out]
struct ggml_tensor* w = params["proj.weight"]; struct ggml_tensor* w = params["proj.weight"];
@ -222,7 +222,7 @@ public:
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias)); blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [ne3, ne2, ne1, dim_in] // x: [ne3, ne2, ne1, dim_in]
// return: [ne3, ne2, ne1, dim_out] // return: [ne3, ne2, ne1, dim_out]
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]); auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
@ -325,7 +325,7 @@ public:
auto k = to_k->forward(ctx, context); // [N, n_context, inner_dim] auto k = to_k->forward(ctx, context); // [N, n_context, inner_dim]
auto v = to_v->forward(ctx, context); // [N, n_context, inner_dim] auto v = to_v->forward(ctx, context); // [N, n_context, inner_dim]
x = ggml_nn_attention_ext(ctx, backend, q, k, v, n_head, NULL, false, false, flash_attn); // [N, n_token, inner_dim] x = ggml_nn_attention_ext(ctx, backend, q, k, v, n_head, nullptr, false, false, flash_attn); // [N, n_token, inner_dim]
x = to_out_0->forward(ctx, x); // [N, n_token, query_dim] x = to_out_0->forward(ctx, x); // [N, n_token, query_dim]
return x; return x;
@ -483,7 +483,7 @@ public:
class AlphaBlender : public GGMLBlock { class AlphaBlender : public GGMLBlock {
protected: protected:
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") override {
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix // Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);

View File

@ -6,9 +6,9 @@
#include "t5.hpp" #include "t5.hpp"
struct SDCondition { struct SDCondition {
struct ggml_tensor* c_crossattn = NULL; // aka context struct ggml_tensor* c_crossattn = nullptr; // aka context
struct ggml_tensor* c_vector = NULL; // aka y struct ggml_tensor* c_vector = nullptr; // aka y
struct ggml_tensor* c_concat = NULL; struct ggml_tensor* c_concat = nullptr;
SDCondition() = default; SDCondition() = default;
SDCondition(struct ggml_tensor* c_crossattn, struct ggml_tensor* c_vector, struct ggml_tensor* c_concat) SDCondition(struct ggml_tensor* c_crossattn, struct ggml_tensor* c_vector, struct ggml_tensor* c_concat)
@ -79,28 +79,28 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
} }
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
text_model->get_param_tensors(tensors, "cond_stage_model.transformer.text_model"); text_model->get_param_tensors(tensors, "cond_stage_model.transformer.text_model");
if (sd_version_is_sdxl(version)) { if (sd_version_is_sdxl(version)) {
text_model2->get_param_tensors(tensors, "cond_stage_model.1.transformer.text_model"); text_model2->get_param_tensors(tensors, "cond_stage_model.1.transformer.text_model");
} }
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
text_model->alloc_params_buffer(); text_model->alloc_params_buffer();
if (sd_version_is_sdxl(version)) { if (sd_version_is_sdxl(version)) {
text_model2->alloc_params_buffer(); text_model2->alloc_params_buffer();
} }
} }
void free_params_buffer() { void free_params_buffer() override {
text_model->free_params_buffer(); text_model->free_params_buffer();
if (sd_version_is_sdxl(version)) { if (sd_version_is_sdxl(version)) {
text_model2->free_params_buffer(); text_model2->free_params_buffer();
} }
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
size_t buffer_size = text_model->get_params_buffer_size(); size_t buffer_size = text_model->get_params_buffer_size();
if (sd_version_is_sdxl(version)) { if (sd_version_is_sdxl(version)) {
buffer_size += text_model2->get_params_buffer_size(); buffer_size += text_model2->get_params_buffer_size();
@ -121,11 +121,11 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
} }
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = 100 * 1024 * 1024; // max for custom embeddings 100 MB params.mem_size = 100 * 1024 * 1024; // max for custom embeddings 100 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* embd_ctx = ggml_init(params); struct ggml_context* embd_ctx = ggml_init(params);
struct ggml_tensor* embd = NULL; struct ggml_tensor* embd = nullptr;
struct ggml_tensor* embd2 = NULL; struct ggml_tensor* embd2 = nullptr;
auto on_load = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) { auto on_load = [&](const TensorStorage& tensor_storage, ggml_tensor** dst_tensor) {
if (tensor_storage.ne[0] != text_model->model.hidden_size) { if (tensor_storage.ne[0] != text_model->model.hidden_size) {
if (text_model2) { if (text_model2) {
@ -404,11 +404,11 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
int adm_in_channels = -1, int adm_in_channels = -1,
bool zero_out_masked = false) { bool zero_out_masked = false) {
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
struct ggml_tensor* hidden_states = NULL; // [N, n_token, hidden_size] struct ggml_tensor* hidden_states = nullptr; // [N, n_token, hidden_size]
struct ggml_tensor* chunk_hidden_states = NULL; // [n_token, hidden_size] or [n_token, hidden_size + hidden_size2] struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token, hidden_size] or [n_token, hidden_size + hidden_size2]
struct ggml_tensor* chunk_hidden_states1 = NULL; // [n_token, hidden_size] struct ggml_tensor* chunk_hidden_states1 = nullptr; // [n_token, hidden_size]
struct ggml_tensor* chunk_hidden_states2 = NULL; // [n_token, hidden_size2] struct ggml_tensor* chunk_hidden_states2 = nullptr; // [n_token, hidden_size2]
struct ggml_tensor* pooled = NULL; struct ggml_tensor* pooled = nullptr;
std::vector<float> hidden_states_vec; std::vector<float> hidden_states_vec;
if (clip_skip <= 0) { if (clip_skip <= 0) {
@ -424,7 +424,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
weights.begin() + (chunk_idx + 1) * chunk_len); weights.begin() + (chunk_idx + 1) * chunk_len);
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
struct ggml_tensor* input_ids2 = NULL; struct ggml_tensor* input_ids2 = nullptr;
size_t max_token_idx = 0; size_t max_token_idx = 0;
if (sd_version_is_sdxl(version)) { if (sd_version_is_sdxl(version)) {
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), tokenizer.EOS_TOKEN_ID); auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), tokenizer.EOS_TOKEN_ID);
@ -512,7 +512,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
chunk_hidden_states->ne[0], chunk_hidden_states->ne[0],
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]);
ggml_tensor* vec = NULL; ggml_tensor* vec = nullptr;
if (sd_version_is_sdxl(version)) { if (sd_version_is_sdxl(version)) {
int out_dim = 256; int out_dim = 256;
vec = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, adm_in_channels); vec = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, adm_in_channels);
@ -549,13 +549,13 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
GGML_ASSERT(offset == ggml_nbytes(vec)); GGML_ASSERT(offset == ggml_nbytes(vec));
} }
// print_ggml_tensor(result); // print_ggml_tensor(result);
return SDCondition(hidden_states, vec, NULL); return {hidden_states, vec, nullptr};
} }
std::tuple<SDCondition, std::vector<bool>> std::tuple<SDCondition, std::vector<bool>>
get_learned_condition_with_trigger(ggml_context* work_ctx, get_learned_condition_with_trigger(ggml_context* work_ctx,
int n_threads, int n_threads,
const ConditionerParams& conditioner_params) { const ConditionerParams& conditioner_params) override {
auto image_tokens = convert_token_to_id(trigger_word); auto image_tokens = convert_token_to_id(trigger_word);
// if(image_tokens.size() == 1){ // if(image_tokens.size() == 1){
// printf(" image token id is: %d \n", image_tokens[0]); // printf(" image token id is: %d \n", image_tokens[0]);
@ -589,7 +589,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
} }
std::string remove_trigger_from_prompt(ggml_context* work_ctx, std::string remove_trigger_from_prompt(ggml_context* work_ctx,
const std::string& prompt) { const std::string& prompt) override {
auto image_tokens = convert_token_to_id(trigger_word); auto image_tokens = convert_token_to_id(trigger_word);
GGML_ASSERT(image_tokens.size() == 1); GGML_ASSERT(image_tokens.size() == 1);
auto tokens_and_weights = tokenize(prompt, false); auto tokens_and_weights = tokenize(prompt, false);
@ -602,7 +602,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
SDCondition get_learned_condition(ggml_context* work_ctx, SDCondition get_learned_condition(ggml_context* work_ctx,
int n_threads, int n_threads,
const ConditionerParams& conditioner_params) { const ConditionerParams& conditioner_params) override {
auto tokens_and_weights = tokenize(conditioner_params.text, true); auto tokens_and_weights = tokenize(conditioner_params.text, true);
std::vector<int>& tokens = tokens_and_weights.first; std::vector<int>& tokens = tokens_and_weights.first;
std::vector<float>& weights = tokens_and_weights.second; std::vector<float>& weights = tokens_and_weights.second;
@ -628,7 +628,7 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner {
vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer"); vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer");
} }
std::string get_desc() { std::string get_desc() override {
return "clip_vision"; return "clip_vision";
} }
@ -678,25 +678,25 @@ struct SD3CLIPEmbedder : public Conditioner {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer"); t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer");
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model"); clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
clip_g->get_param_tensors(tensors, "text_encoders.clip_g.transformer.text_model"); clip_g->get_param_tensors(tensors, "text_encoders.clip_g.transformer.text_model");
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer"); t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
clip_l->alloc_params_buffer(); clip_l->alloc_params_buffer();
clip_g->alloc_params_buffer(); clip_g->alloc_params_buffer();
t5->alloc_params_buffer(); t5->alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
clip_l->free_params_buffer(); clip_l->free_params_buffer();
clip_g->free_params_buffer(); clip_g->free_params_buffer();
t5->free_params_buffer(); t5->free_params_buffer();
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
size_t buffer_size = clip_l->get_params_buffer_size(); size_t buffer_size = clip_l->get_params_buffer_size();
buffer_size += clip_g->get_params_buffer_size(); buffer_size += clip_g->get_params_buffer_size();
buffer_size += t5->get_params_buffer_size(); buffer_size += t5->get_params_buffer_size();
@ -747,7 +747,7 @@ struct SD3CLIPEmbedder : public Conditioner {
clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, max_length, padding); clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, max_length, padding);
clip_g_tokenizer.pad_tokens(clip_g_tokens, clip_g_weights, max_length, padding); clip_g_tokenizer.pad_tokens(clip_g_tokens, clip_g_weights, max_length, padding);
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, NULL, max_length, padding); t5_tokenizer.pad_tokens(t5_tokens, t5_weights, nullptr, max_length, padding);
// for (int i = 0; i < clip_l_tokens.size(); i++) { // for (int i = 0; i < clip_l_tokens.size(); i++) {
// std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", "; // std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", ";
@ -784,14 +784,14 @@ struct SD3CLIPEmbedder : public Conditioner {
} }
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
struct ggml_tensor* hidden_states = NULL; // [N, n_token*2, 4096] struct ggml_tensor* hidden_states = nullptr; // [N, n_token*2, 4096]
struct ggml_tensor* chunk_hidden_states = NULL; // [n_token*2, 4096] struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token*2, 4096]
struct ggml_tensor* chunk_hidden_states_l = NULL; // [n_token, hidden_size_l] struct ggml_tensor* chunk_hidden_states_l = nullptr; // [n_token, hidden_size_l]
struct ggml_tensor* chunk_hidden_states_g = NULL; // [n_token, hidden_size_g] struct ggml_tensor* chunk_hidden_states_g = nullptr; // [n_token, hidden_size_g]
struct ggml_tensor* chunk_hidden_states_t5 = NULL; // [n_token, hidden_size_t5] struct ggml_tensor* chunk_hidden_states_t5 = nullptr; // [n_token, hidden_size_t5]
struct ggml_tensor* pooled = NULL; struct ggml_tensor* pooled = nullptr;
struct ggml_tensor* pooled_l = NULL; // [768,] struct ggml_tensor* pooled_l = nullptr; // [768,]
struct ggml_tensor* pooled_g = NULL; // [1280,] struct ggml_tensor* pooled_g = nullptr; // [1280,]
std::vector<float> hidden_states_vec; std::vector<float> hidden_states_vec;
size_t chunk_len = 77; size_t chunk_len = 77;
@ -810,7 +810,7 @@ struct SD3CLIPEmbedder : public Conditioner {
clip_l->compute(n_threads, clip_l->compute(n_threads,
input_ids, input_ids,
0, 0,
NULL, nullptr,
max_token_idx, max_token_idx,
false, false,
clip_skip, clip_skip,
@ -838,7 +838,7 @@ struct SD3CLIPEmbedder : public Conditioner {
clip_l->compute(n_threads, clip_l->compute(n_threads,
input_ids, input_ids,
0, 0,
NULL, nullptr,
max_token_idx, max_token_idx,
true, true,
clip_skip, clip_skip,
@ -860,7 +860,7 @@ struct SD3CLIPEmbedder : public Conditioner {
clip_g->compute(n_threads, clip_g->compute(n_threads,
input_ids, input_ids,
0, 0,
NULL, nullptr,
max_token_idx, max_token_idx,
false, false,
clip_skip, clip_skip,
@ -889,7 +889,7 @@ struct SD3CLIPEmbedder : public Conditioner {
clip_g->compute(n_threads, clip_g->compute(n_threads,
input_ids, input_ids,
0, 0,
NULL, nullptr,
max_token_idx, max_token_idx,
true, true,
clip_skip, clip_skip,
@ -909,7 +909,7 @@ struct SD3CLIPEmbedder : public Conditioner {
t5->compute(n_threads, t5->compute(n_threads,
input_ids, input_ids,
NULL, nullptr,
&chunk_hidden_states_t5, &chunk_hidden_states_t5,
work_ctx); work_ctx);
{ {
@ -974,12 +974,12 @@ struct SD3CLIPEmbedder : public Conditioner {
hidden_states, hidden_states,
chunk_hidden_states->ne[0], chunk_hidden_states->ne[0],
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]);
return SDCondition(hidden_states, pooled, NULL); return {hidden_states, pooled, nullptr};
} }
SDCondition get_learned_condition(ggml_context* work_ctx, SDCondition get_learned_condition(ggml_context* work_ctx,
int n_threads, int n_threads,
const ConditionerParams& conditioner_params) { const ConditionerParams& conditioner_params) override {
auto tokens_and_weights = tokenize(conditioner_params.text, 77, true); auto tokens_and_weights = tokenize(conditioner_params.text, 77, true);
return get_learned_condition_common(work_ctx, return get_learned_condition_common(work_ctx,
n_threads, n_threads,
@ -1003,22 +1003,22 @@ struct FluxCLIPEmbedder : public Conditioner {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer"); t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer");
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model"); clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer"); t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
clip_l->alloc_params_buffer(); clip_l->alloc_params_buffer();
t5->alloc_params_buffer(); t5->alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
clip_l->free_params_buffer(); clip_l->free_params_buffer();
t5->free_params_buffer(); t5->free_params_buffer();
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
size_t buffer_size = clip_l->get_params_buffer_size(); size_t buffer_size = clip_l->get_params_buffer_size();
buffer_size += t5->get_params_buffer_size(); buffer_size += t5->get_params_buffer_size();
return buffer_size; return buffer_size;
@ -1061,7 +1061,7 @@ struct FluxCLIPEmbedder : public Conditioner {
} }
clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, 77, padding); clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, 77, padding);
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, NULL, max_length, padding); t5_tokenizer.pad_tokens(t5_tokens, t5_weights, nullptr, max_length, padding);
// for (int i = 0; i < clip_l_tokens.size(); i++) { // for (int i = 0; i < clip_l_tokens.size(); i++) {
// std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", "; // std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", ";
@ -1091,9 +1091,9 @@ struct FluxCLIPEmbedder : public Conditioner {
} }
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
struct ggml_tensor* hidden_states = NULL; // [N, n_token, 4096] struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 4096]
struct ggml_tensor* chunk_hidden_states = NULL; // [n_token, 4096] struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token, 4096]
struct ggml_tensor* pooled = NULL; // [768,] struct ggml_tensor* pooled = nullptr; // [768,]
std::vector<float> hidden_states_vec; std::vector<float> hidden_states_vec;
size_t chunk_count = t5_tokens.size() / chunk_len; size_t chunk_count = t5_tokens.size() / chunk_len;
@ -1115,7 +1115,7 @@ struct FluxCLIPEmbedder : public Conditioner {
clip_l->compute(n_threads, clip_l->compute(n_threads,
input_ids, input_ids,
0, 0,
NULL, nullptr,
max_token_idx, max_token_idx,
true, true,
clip_skip, clip_skip,
@ -1134,7 +1134,7 @@ struct FluxCLIPEmbedder : public Conditioner {
t5->compute(n_threads, t5->compute(n_threads,
input_ids, input_ids,
NULL, nullptr,
&chunk_hidden_states, &chunk_hidden_states,
work_ctx); work_ctx);
{ {
@ -1173,12 +1173,12 @@ struct FluxCLIPEmbedder : public Conditioner {
hidden_states, hidden_states,
chunk_hidden_states->ne[0], chunk_hidden_states->ne[0],
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]);
return SDCondition(hidden_states, pooled, NULL); return {hidden_states, pooled, nullptr};
} }
SDCondition get_learned_condition(ggml_context* work_ctx, SDCondition get_learned_condition(ggml_context* work_ctx,
int n_threads, int n_threads,
const ConditionerParams& conditioner_params) { const ConditionerParams& conditioner_params) override {
auto tokens_and_weights = tokenize(conditioner_params.text, chunk_len, true); auto tokens_and_weights = tokenize(conditioner_params.text, chunk_len, true);
return get_learned_condition_common(work_ctx, return get_learned_condition_common(work_ctx,
n_threads, n_threads,
@ -1206,19 +1206,19 @@ struct T5CLIPEmbedder : public Conditioner {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer", is_umt5); t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer", is_umt5);
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer"); t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
t5->alloc_params_buffer(); t5->alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
t5->free_params_buffer(); t5->free_params_buffer();
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
size_t buffer_size = 0; size_t buffer_size = 0;
buffer_size += t5->get_params_buffer_size(); buffer_size += t5->get_params_buffer_size();
@ -1287,9 +1287,9 @@ struct T5CLIPEmbedder : public Conditioner {
auto& t5_attn_mask_vec = std::get<2>(token_and_weights); auto& t5_attn_mask_vec = std::get<2>(token_and_weights);
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
struct ggml_tensor* hidden_states = NULL; // [N, n_token, 4096] struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 4096]
struct ggml_tensor* chunk_hidden_states = NULL; // [n_token, 4096] struct ggml_tensor* chunk_hidden_states = nullptr; // [n_token, 4096]
struct ggml_tensor* pooled = NULL; struct ggml_tensor* pooled = nullptr;
struct ggml_tensor* t5_attn_mask = vector_to_ggml_tensor(work_ctx, t5_attn_mask_vec); // [n_token] struct ggml_tensor* t5_attn_mask = vector_to_ggml_tensor(work_ctx, t5_attn_mask_vec); // [n_token]
std::vector<float> hidden_states_vec; std::vector<float> hidden_states_vec;
@ -1306,7 +1306,7 @@ struct T5CLIPEmbedder : public Conditioner {
t5_attn_mask_vec.begin() + (chunk_idx + 1) * chunk_len); t5_attn_mask_vec.begin() + (chunk_idx + 1) * chunk_len);
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
auto t5_attn_mask_chunk = use_mask ? vector_to_ggml_tensor(work_ctx, chunk_mask) : NULL; auto t5_attn_mask_chunk = use_mask ? vector_to_ggml_tensor(work_ctx, chunk_mask) : nullptr;
t5->compute(n_threads, t5->compute(n_threads,
input_ids, input_ids,
@ -1358,12 +1358,12 @@ struct T5CLIPEmbedder : public Conditioner {
modify_mask_to_attend_padding(t5_attn_mask, ggml_nelements(t5_attn_mask), mask_pad); modify_mask_to_attend_padding(t5_attn_mask, ggml_nelements(t5_attn_mask), mask_pad);
return SDCondition(hidden_states, t5_attn_mask, NULL); return {hidden_states, t5_attn_mask, nullptr};
} }
SDCondition get_learned_condition(ggml_context* work_ctx, SDCondition get_learned_condition(ggml_context* work_ctx,
int n_threads, int n_threads,
const ConditionerParams& conditioner_params) { const ConditionerParams& conditioner_params) override {
auto tokens_and_weights = tokenize(conditioner_params.text, chunk_len, true); auto tokens_and_weights = tokenize(conditioner_params.text, chunk_len, true);
return get_learned_condition_common(work_ctx, return get_learned_condition_common(work_ctx,
n_threads, n_threads,
@ -1389,19 +1389,19 @@ struct Qwen2_5_VLCLIPEmbedder : public Conditioner {
enable_vision); enable_vision);
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
qwenvl->get_param_tensors(tensors, "text_encoders.qwen2vl"); qwenvl->get_param_tensors(tensors, "text_encoders.qwen2vl");
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
qwenvl->alloc_params_buffer(); qwenvl->alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
qwenvl->free_params_buffer(); qwenvl->free_params_buffer();
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
size_t buffer_size = 0; size_t buffer_size = 0;
buffer_size += qwenvl->get_params_buffer_size(); buffer_size += qwenvl->get_params_buffer_size();
return buffer_size; return buffer_size;
@ -1454,7 +1454,7 @@ struct Qwen2_5_VLCLIPEmbedder : public Conditioner {
SDCondition get_learned_condition(ggml_context* work_ctx, SDCondition get_learned_condition(ggml_context* work_ctx,
int n_threads, int n_threads,
const ConditionerParams& conditioner_params) { const ConditionerParams& conditioner_params) override {
std::string prompt; std::string prompt;
std::vector<std::pair<int, ggml_tensor*>> image_embeds; std::vector<std::pair<int, ggml_tensor*>> image_embeds;
size_t system_prompt_length = 0; size_t system_prompt_length = 0;
@ -1530,7 +1530,7 @@ struct Qwen2_5_VLCLIPEmbedder : public Conditioner {
auto& weights = std::get<1>(tokens_and_weights); auto& weights = std::get<1>(tokens_and_weights);
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
struct ggml_tensor* hidden_states = NULL; // [N, n_token, 3584] struct ggml_tensor* hidden_states = nullptr; // [N, n_token, 3584]
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
@ -1570,7 +1570,7 @@ struct Qwen2_5_VLCLIPEmbedder : public Conditioner {
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0); LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0);
return SDCondition(new_hidden_states, nullptr, nullptr); return {new_hidden_states, nullptr, nullptr};
} }
}; };

View File

@ -206,18 +206,18 @@ public:
struct ggml_tensor* guided_hint, struct ggml_tensor* guided_hint,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y = NULL) { struct ggml_tensor* y = nullptr) {
// x: [N, in_channels, h, w] or [N, in_channels/2, h, w] // x: [N, in_channels, h, w] or [N, in_channels/2, h, w]
// timesteps: [N,] // timesteps: [N,]
// context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768] // context: [N, max_position, hidden_size] or [1, max_position, hidden_size]. for example, [N, 77, 768]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
if (context != NULL) { if (context != nullptr) {
if (context->ne[2] != x->ne[3]) { if (context->ne[2] != x->ne[3]) {
context = ggml_repeat(ctx, context, ggml_new_tensor_3d(ctx, GGML_TYPE_F32, context->ne[0], context->ne[1], x->ne[3])); context = ggml_repeat(ctx, context, ggml_new_tensor_3d(ctx, GGML_TYPE_F32, context->ne[0], context->ne[1], x->ne[3]));
} }
} }
if (y != NULL) { if (y != nullptr) {
if (y->ne[1] != x->ne[3]) { if (y->ne[1] != x->ne[3]) {
y = ggml_repeat(ctx, y, ggml_new_tensor_2d(ctx, GGML_TYPE_F32, y->ne[0], x->ne[3])); y = ggml_repeat(ctx, y, ggml_new_tensor_2d(ctx, GGML_TYPE_F32, y->ne[0], x->ne[3]));
} }
@ -237,7 +237,7 @@ public:
emb = time_embed_2->forward(ctx, emb); // [N, time_embed_dim] emb = time_embed_2->forward(ctx, emb); // [N, time_embed_dim]
// SDXL/SVD // SDXL/SVD
if (y != NULL) { if (y != nullptr) {
auto label_embed_0 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.0"]); auto label_embed_0 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.0"]);
auto label_embed_2 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.2"]); auto label_embed_2 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.2"]);
@ -250,7 +250,7 @@ public:
std::vector<struct ggml_tensor*> outs; std::vector<struct ggml_tensor*> outs;
if (guided_hint == NULL) { if (guided_hint == nullptr) {
guided_hint = input_hint_block_forward(ctx, hint, emb, context); guided_hint = input_hint_block_forward(ctx, hint, emb, context);
} }
outs.push_back(guided_hint); outs.push_back(guided_hint);
@ -312,10 +312,10 @@ struct ControlNet : public GGMLRunner {
SDVersion version = VERSION_SD1; SDVersion version = VERSION_SD1;
ControlNetBlock control_net; ControlNetBlock control_net;
ggml_backend_buffer_t control_buffer = NULL; // keep control output tensors in backend memory ggml_backend_buffer_t control_buffer = nullptr; // keep control output tensors in backend memory
ggml_context* control_ctx = NULL; ggml_context* control_ctx = nullptr;
std::vector<struct ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5 std::vector<struct ggml_tensor*> controls; // (12 input block outputs, 1 middle block output) SD 1.5
struct ggml_tensor* guided_hint = NULL; // guided_hint cache, for faster inference struct ggml_tensor* guided_hint = nullptr; // guided_hint cache, for faster inference
bool guided_hint_cached = false; bool guided_hint_cached = false;
ControlNet(ggml_backend_t backend, ControlNet(ggml_backend_t backend,
@ -337,14 +337,14 @@ struct ControlNet : public GGMLRunner {
} }
} }
~ControlNet() { ~ControlNet() override {
free_control_ctx(); free_control_ctx();
} }
void alloc_control_ctx(std::vector<struct ggml_tensor*> outs) { void alloc_control_ctx(std::vector<struct ggml_tensor*> outs) {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024; params.mem_size = static_cast<size_t>(outs.size() * ggml_tensor_overhead()) + 1024 * 1024;
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = true; params.no_alloc = true;
control_ctx = ggml_init(params); control_ctx = ggml_init(params);
@ -366,20 +366,20 @@ struct ControlNet : public GGMLRunner {
} }
void free_control_ctx() { void free_control_ctx() {
if (control_buffer != NULL) { if (control_buffer != nullptr) {
ggml_backend_buffer_free(control_buffer); ggml_backend_buffer_free(control_buffer);
control_buffer = NULL; control_buffer = nullptr;
} }
if (control_ctx != NULL) { if (control_ctx != nullptr) {
ggml_free(control_ctx); ggml_free(control_ctx);
control_ctx = NULL; control_ctx = nullptr;
} }
guided_hint = NULL; guided_hint = nullptr;
guided_hint_cached = false; guided_hint_cached = false;
controls.clear(); controls.clear();
} }
std::string get_desc() { std::string get_desc() override {
return "control_net"; return "control_net";
} }
@ -391,12 +391,12 @@ struct ControlNet : public GGMLRunner {
struct ggml_tensor* hint, struct ggml_tensor* hint,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y = NULL) { struct ggml_tensor* y = nullptr) {
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, CONTROL_NET_GRAPH_SIZE, false); struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, CONTROL_NET_GRAPH_SIZE, false);
x = to_backend(x); x = to_backend(x);
if (guided_hint_cached) { if (guided_hint_cached) {
hint = NULL; hint = nullptr;
} else { } else {
hint = to_backend(hint); hint = to_backend(hint);
} }
@ -408,12 +408,12 @@ struct ControlNet : public GGMLRunner {
runtime_backend, runtime_backend,
x, x,
hint, hint,
guided_hint_cached ? guided_hint : NULL, guided_hint_cached ? guided_hint : nullptr,
timesteps, timesteps,
context, context,
y); y);
if (control_ctx == NULL) { if (control_ctx == nullptr) {
alloc_control_ctx(outs); alloc_control_ctx(outs);
} }
@ -431,8 +431,8 @@ struct ControlNet : public GGMLRunner {
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]

View File

@ -19,7 +19,7 @@ struct SigmaSchedule {
}; };
struct DiscreteSchedule : SigmaSchedule { struct DiscreteSchedule : SigmaSchedule {
std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) { std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) override {
std::vector<float> result; std::vector<float> result;
int t_max = TIMESTEPS - 1; int t_max = TIMESTEPS - 1;
@ -43,7 +43,7 @@ struct DiscreteSchedule : SigmaSchedule {
}; };
struct ExponentialSchedule : SigmaSchedule { struct ExponentialSchedule : SigmaSchedule {
std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) { std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) override {
std::vector<float> sigmas; std::vector<float> sigmas;
// Calculate step size // Calculate step size
@ -150,7 +150,7 @@ std::vector<float> log_linear_interpolation(std::vector<float> sigma_in,
https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
*/ */
struct AYSSchedule : SigmaSchedule { struct AYSSchedule : SigmaSchedule {
std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) { std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) override {
const std::vector<float> noise_levels[] = { const std::vector<float> noise_levels[] = {
/* SD1.5 */ /* SD1.5 */
{14.6146412293f, 6.4745760956f, 3.8636745985f, 2.6946151520f, {14.6146412293f, 6.4745760956f, 3.8636745985f, 2.6946151520f,
@ -204,7 +204,7 @@ struct AYSSchedule : SigmaSchedule {
* GITS Scheduler: https://github.com/zju-pi/diff-sampler/tree/main/gits-main * GITS Scheduler: https://github.com/zju-pi/diff-sampler/tree/main/gits-main
*/ */
struct GITSSchedule : SigmaSchedule { struct GITSSchedule : SigmaSchedule {
std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) { std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) override {
if (sigma_max <= 0.0f) { if (sigma_max <= 0.0f) {
return std::vector<float>{}; return std::vector<float>{};
} }
@ -252,7 +252,7 @@ struct SGMUniformSchedule : SigmaSchedule {
}; };
struct KarrasSchedule : SigmaSchedule { struct KarrasSchedule : SigmaSchedule {
std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) { std::vector<float> get_sigmas(uint32_t n, float sigma_min, float sigma_max, t_to_sigma_t t_to_sigma) override {
// These *COULD* be function arguments here, // These *COULD* be function arguments here,
// but does anybody ever bother to touch them? // but does anybody ever bother to touch them?
float rho = 7.f; float rho = 7.f;
@ -350,15 +350,15 @@ struct CompVisDenoiser : public Denoiser {
float sigma_data = 1.0f; float sigma_data = 1.0f;
float sigma_min() { float sigma_min() override {
return sigmas[0]; return sigmas[0];
} }
float sigma_max() { float sigma_max() override {
return sigmas[TIMESTEPS - 1]; return sigmas[TIMESTEPS - 1];
} }
float sigma_to_t(float sigma) { float sigma_to_t(float sigma) override {
float log_sigma = std::log(sigma); float log_sigma = std::log(sigma);
std::vector<float> dists; std::vector<float> dists;
dists.reserve(TIMESTEPS); dists.reserve(TIMESTEPS);
@ -384,7 +384,7 @@ struct CompVisDenoiser : public Denoiser {
return t; return t;
} }
float t_to_sigma(float t) { float t_to_sigma(float t) override {
int low_idx = static_cast<int>(std::floor(t)); int low_idx = static_cast<int>(std::floor(t));
int high_idx = static_cast<int>(std::ceil(t)); int high_idx = static_cast<int>(std::ceil(t));
float w = t - static_cast<float>(low_idx); float w = t - static_cast<float>(low_idx);
@ -392,7 +392,7 @@ struct CompVisDenoiser : public Denoiser {
return std::exp(log_sigma); return std::exp(log_sigma);
} }
std::vector<float> get_scalings(float sigma) { std::vector<float> get_scalings(float sigma) override {
float c_skip = 1.0f; float c_skip = 1.0f;
float c_out = -sigma; float c_out = -sigma;
float c_in = 1.0f / std::sqrt(sigma * sigma + sigma_data * sigma_data); float c_in = 1.0f / std::sqrt(sigma * sigma + sigma_data * sigma_data);
@ -400,19 +400,19 @@ struct CompVisDenoiser : public Denoiser {
} }
// this function will modify noise/latent // this function will modify noise/latent
ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) { ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) override {
ggml_tensor_scale(noise, sigma); ggml_tensor_scale(noise, sigma);
ggml_tensor_add(latent, noise); ggml_tensor_add(latent, noise);
return latent; return latent;
} }
ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) { ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) override {
return latent; return latent;
} }
}; };
struct CompVisVDenoiser : public CompVisDenoiser { struct CompVisVDenoiser : public CompVisDenoiser {
std::vector<float> get_scalings(float sigma) { std::vector<float> get_scalings(float sigma) override {
float c_skip = sigma_data * sigma_data / (sigma * sigma + sigma_data * sigma_data); float c_skip = sigma_data * sigma_data / (sigma * sigma + sigma_data * sigma_data);
float c_out = -sigma * sigma_data / std::sqrt(sigma * sigma + sigma_data * sigma_data); float c_out = -sigma * sigma_data / std::sqrt(sigma * sigma + sigma_data * sigma_data);
float c_in = 1.0f / std::sqrt(sigma * sigma + sigma_data * sigma_data); float c_in = 1.0f / std::sqrt(sigma * sigma + sigma_data * sigma_data);
@ -429,19 +429,19 @@ struct EDMVDenoiser : public CompVisVDenoiser {
scheduler = std::make_shared<ExponentialSchedule>(); scheduler = std::make_shared<ExponentialSchedule>();
} }
float t_to_sigma(float t) { float t_to_sigma(float t) override {
return std::exp(t * 4 / (float)TIMESTEPS); return std::exp(t * 4 / (float)TIMESTEPS);
} }
float sigma_to_t(float s) { float sigma_to_t(float s) override {
return 0.25 * std::log(s); return 0.25 * std::log(s);
} }
float sigma_min() { float sigma_min() override {
return min_sigma; return min_sigma;
} }
float sigma_max() { float sigma_max() override {
return max_sigma; return max_sigma;
} }
}; };
@ -470,24 +470,24 @@ struct DiscreteFlowDenoiser : public Denoiser {
} }
} }
float sigma_min() { float sigma_min() override {
return sigmas[0]; return sigmas[0];
} }
float sigma_max() { float sigma_max() override {
return sigmas[TIMESTEPS - 1]; return sigmas[TIMESTEPS - 1];
} }
float sigma_to_t(float sigma) { float sigma_to_t(float sigma) override {
return sigma * 1000.f; return sigma * 1000.f;
} }
float t_to_sigma(float t) { float t_to_sigma(float t) override {
t = t + 1; t = t + 1;
return time_snr_shift(shift, t / 1000.f); return time_snr_shift(shift, t / 1000.f);
} }
std::vector<float> get_scalings(float sigma) { std::vector<float> get_scalings(float sigma) override {
float c_skip = 1.0f; float c_skip = 1.0f;
float c_out = -sigma; float c_out = -sigma;
float c_in = 1.0f; float c_in = 1.0f;
@ -495,14 +495,14 @@ struct DiscreteFlowDenoiser : public Denoiser {
} }
// this function will modify noise/latent // this function will modify noise/latent
ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) { ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) override {
ggml_tensor_scale(noise, sigma); ggml_tensor_scale(noise, sigma);
ggml_tensor_scale(latent, 1.0f - sigma); ggml_tensor_scale(latent, 1.0f - sigma);
ggml_tensor_add(latent, noise); ggml_tensor_add(latent, noise);
return latent; return latent;
} }
ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) { ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) override {
ggml_tensor_scale(latent, 1.0f / (1.0f - sigma)); ggml_tensor_scale(latent, 1.0f / (1.0f - sigma));
return latent; return latent;
} }
@ -529,24 +529,24 @@ struct FluxFlowDenoiser : public Denoiser {
} }
} }
float sigma_min() { float sigma_min() override {
return sigmas[0]; return sigmas[0];
} }
float sigma_max() { float sigma_max() override {
return sigmas[TIMESTEPS - 1]; return sigmas[TIMESTEPS - 1];
} }
float sigma_to_t(float sigma) { float sigma_to_t(float sigma) override {
return sigma; return sigma;
} }
float t_to_sigma(float t) { float t_to_sigma(float t) override {
t = t + 1; t = t + 1;
return flux_time_shift(shift, 1.0f, t / TIMESTEPS); return flux_time_shift(shift, 1.0f, t / TIMESTEPS);
} }
std::vector<float> get_scalings(float sigma) { std::vector<float> get_scalings(float sigma) override {
float c_skip = 1.0f; float c_skip = 1.0f;
float c_out = -sigma; float c_out = -sigma;
float c_in = 1.0f; float c_in = 1.0f;
@ -554,14 +554,14 @@ struct FluxFlowDenoiser : public Denoiser {
} }
// this function will modify noise/latent // this function will modify noise/latent
ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) { ggml_tensor* noise_scaling(float sigma, ggml_tensor* noise, ggml_tensor* latent) override {
ggml_tensor_scale(noise, sigma); ggml_tensor_scale(noise, sigma);
ggml_tensor_scale(latent, 1.0f - sigma); ggml_tensor_scale(latent, 1.0f - sigma);
ggml_tensor_add(latent, noise); ggml_tensor_add(latent, noise);
return latent; return latent;
} }
ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) { ggml_tensor* inverse_noise_scaling(float sigma, ggml_tensor* latent) override {
ggml_tensor_scale(latent, 1.0f / (1.0f - sigma)); ggml_tensor_scale(latent, 1.0f / (1.0f - sigma));
return latent; return latent;
} }

View File

@ -8,18 +8,18 @@
#include "wan.hpp" #include "wan.hpp"
struct DiffusionParams { struct DiffusionParams {
struct ggml_tensor* x = NULL; struct ggml_tensor* x = nullptr;
struct ggml_tensor* timesteps = NULL; struct ggml_tensor* timesteps = nullptr;
struct ggml_tensor* context = NULL; struct ggml_tensor* context = nullptr;
struct ggml_tensor* c_concat = NULL; struct ggml_tensor* c_concat = nullptr;
struct ggml_tensor* y = NULL; struct ggml_tensor* y = nullptr;
struct ggml_tensor* guidance = NULL; struct ggml_tensor* guidance = nullptr;
std::vector<ggml_tensor*> ref_latents = {}; std::vector<ggml_tensor*> ref_latents = {};
bool increase_ref_index = false; bool increase_ref_index = false;
int num_video_frames = -1; int num_video_frames = -1;
std::vector<struct ggml_tensor*> controls = {}; std::vector<struct ggml_tensor*> controls = {};
float control_strength = 0.f; float control_strength = 0.f;
struct ggml_tensor* vace_context = NULL; struct ggml_tensor* vace_context = nullptr;
float vace_strength = 1.f; float vace_strength = 1.f;
std::vector<int> skip_layers = {}; std::vector<int> skip_layers = {};
}; };
@ -28,8 +28,8 @@ struct DiffusionModel {
virtual std::string get_desc() = 0; virtual std::string get_desc() = 0;
virtual void compute(int n_threads, virtual void compute(int n_threads,
DiffusionParams diffusion_params, DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) = 0; struct ggml_context* output_ctx = nullptr) = 0;
virtual void alloc_params_buffer() = 0; virtual void alloc_params_buffer() = 0;
virtual void free_params_buffer() = 0; virtual void free_params_buffer() = 0;
virtual void free_compute_buffer() = 0; virtual void free_compute_buffer() = 0;
@ -49,38 +49,38 @@ struct UNetModel : public DiffusionModel {
: unet(backend, offload_params_to_cpu, tensor_types, "model.diffusion_model", version, flash_attn) { : unet(backend, offload_params_to_cpu, tensor_types, "model.diffusion_model", version, flash_attn) {
} }
std::string get_desc() { std::string get_desc() override {
return unet.get_desc(); return unet.get_desc();
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
unet.alloc_params_buffer(); unet.alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
unet.free_params_buffer(); unet.free_params_buffer();
} }
void free_compute_buffer() { void free_compute_buffer() override {
unet.free_compute_buffer(); unet.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
unet.get_param_tensors(tensors, "model.diffusion_model"); unet.get_param_tensors(tensors, "model.diffusion_model");
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
return unet.get_params_buffer_size(); return unet.get_params_buffer_size();
} }
int64_t get_adm_in_channels() { int64_t get_adm_in_channels() override {
return unet.unet.adm_in_channels; return unet.unet.adm_in_channels;
} }
void compute(int n_threads, void compute(int n_threads,
DiffusionParams diffusion_params, DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
return unet.compute(n_threads, return unet.compute(n_threads,
diffusion_params.x, diffusion_params.x,
diffusion_params.timesteps, diffusion_params.timesteps,
@ -103,38 +103,38 @@ struct MMDiTModel : public DiffusionModel {
: mmdit(backend, offload_params_to_cpu, flash_attn, tensor_types, "model.diffusion_model") { : mmdit(backend, offload_params_to_cpu, flash_attn, tensor_types, "model.diffusion_model") {
} }
std::string get_desc() { std::string get_desc() override {
return mmdit.get_desc(); return mmdit.get_desc();
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
mmdit.alloc_params_buffer(); mmdit.alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
mmdit.free_params_buffer(); mmdit.free_params_buffer();
} }
void free_compute_buffer() { void free_compute_buffer() override {
mmdit.free_compute_buffer(); mmdit.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
mmdit.get_param_tensors(tensors, "model.diffusion_model"); mmdit.get_param_tensors(tensors, "model.diffusion_model");
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
return mmdit.get_params_buffer_size(); return mmdit.get_params_buffer_size();
} }
int64_t get_adm_in_channels() { int64_t get_adm_in_channels() override {
return 768 + 1280; return 768 + 1280;
} }
void compute(int n_threads, void compute(int n_threads,
DiffusionParams diffusion_params, DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
return mmdit.compute(n_threads, return mmdit.compute(n_threads,
diffusion_params.x, diffusion_params.x,
diffusion_params.timesteps, diffusion_params.timesteps,
@ -158,38 +158,38 @@ struct FluxModel : public DiffusionModel {
: flux(backend, offload_params_to_cpu, tensor_types, "model.diffusion_model", version, flash_attn, use_mask) { : flux(backend, offload_params_to_cpu, tensor_types, "model.diffusion_model", version, flash_attn, use_mask) {
} }
std::string get_desc() { std::string get_desc() override {
return flux.get_desc(); return flux.get_desc();
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
flux.alloc_params_buffer(); flux.alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
flux.free_params_buffer(); flux.free_params_buffer();
} }
void free_compute_buffer() { void free_compute_buffer() override {
flux.free_compute_buffer(); flux.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
flux.get_param_tensors(tensors, "model.diffusion_model"); flux.get_param_tensors(tensors, "model.diffusion_model");
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
return flux.get_params_buffer_size(); return flux.get_params_buffer_size();
} }
int64_t get_adm_in_channels() { int64_t get_adm_in_channels() override {
return 768; return 768;
} }
void compute(int n_threads, void compute(int n_threads,
DiffusionParams diffusion_params, DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
return flux.compute(n_threads, return flux.compute(n_threads,
diffusion_params.x, diffusion_params.x,
diffusion_params.timesteps, diffusion_params.timesteps,
@ -218,45 +218,45 @@ struct WanModel : public DiffusionModel {
: prefix(prefix), wan(backend, offload_params_to_cpu, tensor_types, prefix, version, flash_attn) { : prefix(prefix), wan(backend, offload_params_to_cpu, tensor_types, prefix, version, flash_attn) {
} }
std::string get_desc() { std::string get_desc() override {
return wan.get_desc(); return wan.get_desc();
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
wan.alloc_params_buffer(); wan.alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
wan.free_params_buffer(); wan.free_params_buffer();
} }
void free_compute_buffer() { void free_compute_buffer() override {
wan.free_compute_buffer(); wan.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
wan.get_param_tensors(tensors, prefix); wan.get_param_tensors(tensors, prefix);
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
return wan.get_params_buffer_size(); return wan.get_params_buffer_size();
} }
int64_t get_adm_in_channels() { int64_t get_adm_in_channels() override {
return 768; return 768;
} }
void compute(int n_threads, void compute(int n_threads,
DiffusionParams diffusion_params, DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
return wan.compute(n_threads, return wan.compute(n_threads,
diffusion_params.x, diffusion_params.x,
diffusion_params.timesteps, diffusion_params.timesteps,
diffusion_params.context, diffusion_params.context,
diffusion_params.y, diffusion_params.y,
diffusion_params.c_concat, diffusion_params.c_concat,
NULL, nullptr,
diffusion_params.vace_context, diffusion_params.vace_context,
diffusion_params.vace_strength, diffusion_params.vace_strength,
output, output,
@ -277,38 +277,38 @@ struct QwenImageModel : public DiffusionModel {
: prefix(prefix), qwen_image(backend, offload_params_to_cpu, tensor_types, prefix, version, flash_attn) { : prefix(prefix), qwen_image(backend, offload_params_to_cpu, tensor_types, prefix, version, flash_attn) {
} }
std::string get_desc() { std::string get_desc() override {
return qwen_image.get_desc(); return qwen_image.get_desc();
} }
void alloc_params_buffer() { void alloc_params_buffer() override {
qwen_image.alloc_params_buffer(); qwen_image.alloc_params_buffer();
} }
void free_params_buffer() { void free_params_buffer() override {
qwen_image.free_params_buffer(); qwen_image.free_params_buffer();
} }
void free_compute_buffer() { void free_compute_buffer() override {
qwen_image.free_compute_buffer(); qwen_image.free_compute_buffer();
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
qwen_image.get_param_tensors(tensors, prefix); qwen_image.get_param_tensors(tensors, prefix);
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() override {
return qwen_image.get_params_buffer_size(); return qwen_image.get_params_buffer_size();
} }
int64_t get_adm_in_channels() { int64_t get_adm_in_channels() override {
return 768; return 768;
} }
void compute(int n_threads, void compute(int n_threads,
DiffusionParams diffusion_params, DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
return qwen_image.compute(n_threads, return qwen_image.compute(n_threads,
diffusion_params.x, diffusion_params.x,
diffusion_params.timesteps, diffusion_params.timesteps,

View File

@ -174,7 +174,7 @@ struct ESRGAN : public GGMLRunner {
} }
} }
std::string get_desc() { std::string get_desc() override {
return "esrgan"; return "esrgan";
} }
@ -367,7 +367,7 @@ struct ESRGAN : public GGMLRunner {
void compute(const int n_threads, void compute(const int n_threads,
struct ggml_tensor* x, struct ggml_tensor* x,
ggml_tensor** output, ggml_tensor** output,
ggml_context* output_ctx = NULL) { ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(x); return build_graph(x);
}; };

View File

@ -1,10 +1,10 @@
#ifndef __AVI_WRITER_H__ #ifndef __AVI_WRITER_H__
#define __AVI_WRITER_H__ #define __AVI_WRITER_H__
#include <stdint.h> #include <cstdint>
#include <stdio.h> #include <cstdio>
#include <stdlib.h> #include <cstdlib>
#include <string.h> #include <cstring>
#include "stable-diffusion.h" #include "stable-diffusion.h"
@ -130,7 +130,7 @@ int create_mjpg_avi_from_sd_images(const char* filename, sd_image_t* images, int
write_u32_le(f, 0); // Colors important write_u32_le(f, 0); // Colors important
// 'movi' LIST (video frames) // 'movi' LIST (video frames)
long movi_list_pos = ftell(f); // long movi_list_pos = ftell(f);
fwrite("LIST", 4, 1, f); fwrite("LIST", 4, 1, f);
long movi_size_pos = ftell(f); long movi_size_pos = ftell(f);
write_u32_le(f, 0); // Placeholder for movi size write_u32_le(f, 0); // Placeholder for movi size
@ -149,7 +149,7 @@ int create_mjpg_avi_from_sd_images(const char* filename, sd_image_t* images, int
} jpeg_data; } jpeg_data;
for (int i = 0; i < num_images; i++) { for (int i = 0; i < num_images; i++) {
jpeg_data.buf = NULL; jpeg_data.buf = nullptr;
jpeg_data.size = 0; jpeg_data.size = 0;
// Callback function to collect JPEG data into memory // Callback function to collect JPEG data into memory

View File

@ -808,7 +808,7 @@ void parse_args(int argc, const char** argv, SDParams& params) {
return -1; return -1;
} }
const char* mode = argv[index]; const char* mode = argv[index];
if (mode != NULL) { if (mode != nullptr) {
int mode_found = -1; int mode_found = -1;
for (int i = 0; i < MODE_COUNT; i++) { for (int i = 0; i < MODE_COUNT; i++) {
if (!strcmp(mode, modes_str[i])) { if (!strcmp(mode, modes_str[i])) {
@ -1199,7 +1199,7 @@ void parse_args(int argc, const char** argv, SDParams& params) {
} }
if (params.seed < 0) { if (params.seed < 0) {
srand((int)time(NULL)); srand((int)time(nullptr));
params.seed = rand(); params.seed = rand();
} }
@ -1314,9 +1314,9 @@ void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
uint8_t* load_image(const char* image_path, int& width, int& height, int expected_width = 0, int expected_height = 0, int expected_channel = 3) { uint8_t* load_image(const char* image_path, int& width, int& height, int expected_width = 0, int expected_height = 0, int expected_channel = 3) {
int c = 0; int c = 0;
uint8_t* image_buffer = (uint8_t*)stbi_load(image_path, &width, &height, &c, expected_channel); uint8_t* image_buffer = (uint8_t*)stbi_load(image_path, &width, &height, &c, expected_channel);
if (image_buffer == NULL) { if (image_buffer == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", image_path); fprintf(stderr, "load image from '%s' failed\n", image_path);
return NULL; return nullptr;
} }
if (c < expected_channel) { if (c < expected_channel) {
fprintf(stderr, fprintf(stderr,
@ -1326,17 +1326,17 @@ uint8_t* load_image(const char* image_path, int& width, int& height, int expecte
c, c,
image_path); image_path);
free(image_buffer); free(image_buffer);
return NULL; return nullptr;
} }
if (width <= 0) { if (width <= 0) {
fprintf(stderr, "error: the width of image must be greater than 0, image_path = %s\n", image_path); fprintf(stderr, "error: the width of image must be greater than 0, image_path = %s\n", image_path);
free(image_buffer); free(image_buffer);
return NULL; return nullptr;
} }
if (height <= 0) { if (height <= 0) {
fprintf(stderr, "error: the height of image must be greater than 0, image_path = %s\n", image_path); fprintf(stderr, "error: the height of image must be greater than 0, image_path = %s\n", image_path);
free(image_buffer); free(image_buffer);
return NULL; return nullptr;
} }
// Resize input image ... // Resize input image ...
@ -1358,10 +1358,10 @@ uint8_t* load_image(const char* image_path, int& width, int& height, int expecte
if (crop_x != 0 || crop_y != 0) { if (crop_x != 0 || crop_y != 0) {
printf("crop input image from %dx%d to %dx%d, image_path = %s\n", width, height, crop_w, crop_h, image_path); printf("crop input image from %dx%d to %dx%d, image_path = %s\n", width, height, crop_w, crop_h, image_path);
uint8_t* cropped_image_buffer = (uint8_t*)malloc(crop_w * crop_h * expected_channel); uint8_t* cropped_image_buffer = (uint8_t*)malloc(crop_w * crop_h * expected_channel);
if (cropped_image_buffer == NULL) { if (cropped_image_buffer == nullptr) {
fprintf(stderr, "error: allocate memory for crop\n"); fprintf(stderr, "error: allocate memory for crop\n");
free(image_buffer); free(image_buffer);
return NULL; return nullptr;
} }
for (int row = 0; row < crop_h; row++) { for (int row = 0; row < crop_h; row++) {
uint8_t* src = image_buffer + ((crop_y + row) * width + crop_x) * expected_channel; uint8_t* src = image_buffer + ((crop_y + row) * width + crop_x) * expected_channel;
@ -1380,10 +1380,10 @@ uint8_t* load_image(const char* image_path, int& width, int& height, int expecte
int resized_width = expected_width; int resized_width = expected_width;
uint8_t* resized_image_buffer = (uint8_t*)malloc(resized_height * resized_width * expected_channel); uint8_t* resized_image_buffer = (uint8_t*)malloc(resized_height * resized_width * expected_channel);
if (resized_image_buffer == NULL) { if (resized_image_buffer == nullptr) {
fprintf(stderr, "error: allocate memory for resize input image\n"); fprintf(stderr, "error: allocate memory for resize input image\n");
free(image_buffer); free(image_buffer);
return NULL; return nullptr;
} }
stbir_resize(image_buffer, width, height, 0, stbir_resize(image_buffer, width, height, 0,
resized_image_buffer, resized_width, resized_height, 0, STBIR_TYPE_UINT8, resized_image_buffer, resized_width, resized_height, 0, STBIR_TYPE_UINT8,
@ -1434,7 +1434,7 @@ bool load_images_from_dir(const std::string dir,
int width = 0; int width = 0;
int height = 0; int height = 0;
uint8_t* image_buffer = load_image(path.c_str(), width, height, expected_width, expected_height); uint8_t* image_buffer = load_image(path.c_str(), width, height, expected_width, expected_height);
if (image_buffer == NULL) { if (image_buffer == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", path.c_str()); fprintf(stderr, "load image from '%s' failed\n", path.c_str());
return false; return false;
} }
@ -1486,10 +1486,10 @@ int main(int argc, const char* argv[]) {
} }
bool vae_decode_only = true; bool vae_decode_only = true;
sd_image_t init_image = {(uint32_t)params.width, (uint32_t)params.height, 3, NULL}; sd_image_t init_image = {(uint32_t)params.width, (uint32_t)params.height, 3, nullptr};
sd_image_t end_image = {(uint32_t)params.width, (uint32_t)params.height, 3, NULL}; sd_image_t end_image = {(uint32_t)params.width, (uint32_t)params.height, 3, nullptr};
sd_image_t control_image = {(uint32_t)params.width, (uint32_t)params.height, 3, NULL}; sd_image_t control_image = {(uint32_t)params.width, (uint32_t)params.height, 3, nullptr};
sd_image_t mask_image = {(uint32_t)params.width, (uint32_t)params.height, 1, NULL}; sd_image_t mask_image = {(uint32_t)params.width, (uint32_t)params.height, 1, nullptr};
std::vector<sd_image_t> ref_images; std::vector<sd_image_t> ref_images;
std::vector<sd_image_t> pmid_images; std::vector<sd_image_t> pmid_images;
std::vector<sd_image_t> control_frames; std::vector<sd_image_t> control_frames;
@ -1501,17 +1501,17 @@ int main(int argc, const char* argv[]) {
free(mask_image.data); free(mask_image.data);
for (auto image : ref_images) { for (auto image : ref_images) {
free(image.data); free(image.data);
image.data = NULL; image.data = nullptr;
} }
ref_images.clear(); ref_images.clear();
for (auto image : pmid_images) { for (auto image : pmid_images) {
free(image.data); free(image.data);
image.data = NULL; image.data = nullptr;
} }
pmid_images.clear(); pmid_images.clear();
for (auto image : control_frames) { for (auto image : control_frames) {
free(image.data); free(image.data);
image.data = NULL; image.data = nullptr;
} }
control_frames.clear(); control_frames.clear();
}; };
@ -1522,7 +1522,7 @@ int main(int argc, const char* argv[]) {
int width = 0; int width = 0;
int height = 0; int height = 0;
init_image.data = load_image(params.init_image_path.c_str(), width, height, params.width, params.height); init_image.data = load_image(params.init_image_path.c_str(), width, height, params.width, params.height);
if (init_image.data == NULL) { if (init_image.data == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", params.init_image_path.c_str()); fprintf(stderr, "load image from '%s' failed\n", params.init_image_path.c_str());
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1535,7 +1535,7 @@ int main(int argc, const char* argv[]) {
int width = 0; int width = 0;
int height = 0; int height = 0;
end_image.data = load_image(params.end_image_path.c_str(), width, height, params.width, params.height); end_image.data = load_image(params.end_image_path.c_str(), width, height, params.width, params.height);
if (end_image.data == NULL) { if (end_image.data == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", params.end_image_path.c_str()); fprintf(stderr, "load image from '%s' failed\n", params.end_image_path.c_str());
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1547,7 +1547,7 @@ int main(int argc, const char* argv[]) {
int width = 0; int width = 0;
int height = 0; int height = 0;
mask_image.data = load_image(params.mask_image_path.c_str(), width, height, params.width, params.height, 1); mask_image.data = load_image(params.mask_image_path.c_str(), width, height, params.width, params.height, 1);
if (mask_image.data == NULL) { if (mask_image.data == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", params.mask_image_path.c_str()); fprintf(stderr, "load image from '%s' failed\n", params.mask_image_path.c_str());
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1555,7 +1555,7 @@ int main(int argc, const char* argv[]) {
} else { } else {
mask_image.data = (uint8_t*)malloc(params.width * params.height); mask_image.data = (uint8_t*)malloc(params.width * params.height);
memset(mask_image.data, 255, params.width * params.height); memset(mask_image.data, 255, params.width * params.height);
if (mask_image.data == NULL) { if (mask_image.data == nullptr) {
fprintf(stderr, "malloc mask image failed\n"); fprintf(stderr, "malloc mask image failed\n");
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1566,7 +1566,7 @@ int main(int argc, const char* argv[]) {
int width = 0; int width = 0;
int height = 0; int height = 0;
control_image.data = load_image(params.control_image_path.c_str(), width, height, params.width, params.height); control_image.data = load_image(params.control_image_path.c_str(), width, height, params.width, params.height);
if (control_image.data == NULL) { if (control_image.data == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", params.control_image_path.c_str()); fprintf(stderr, "load image from '%s' failed\n", params.control_image_path.c_str());
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1587,7 +1587,7 @@ int main(int argc, const char* argv[]) {
int width = 0; int width = 0;
int height = 0; int height = 0;
uint8_t* image_buffer = load_image(path.c_str(), width, height); uint8_t* image_buffer = load_image(path.c_str(), width, height);
if (image_buffer == NULL) { if (image_buffer == nullptr) {
fprintf(stderr, "load image from '%s' failed\n", path.c_str()); fprintf(stderr, "load image from '%s' failed\n", path.c_str());
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1669,18 +1669,18 @@ int main(int argc, const char* argv[]) {
if (params.mode == UPSCALE) { if (params.mode == UPSCALE) {
num_results = 1; num_results = 1;
results = (sd_image_t*)calloc(num_results, sizeof(sd_image_t)); results = (sd_image_t*)calloc(num_results, sizeof(sd_image_t));
if (results == NULL) { if (results == nullptr) {
printf("failed to allocate results array\n"); printf("failed to allocate results array\n");
release_all_resources(); release_all_resources();
return 1; return 1;
} }
results[0] = init_image; results[0] = init_image;
init_image.data = NULL; init_image.data = nullptr;
} else { } else {
sd_ctx_t* sd_ctx = new_sd_ctx(&sd_ctx_params); sd_ctx_t* sd_ctx = new_sd_ctx(&sd_ctx_params);
if (sd_ctx == NULL) { if (sd_ctx == nullptr) {
printf("new_sd_ctx_t failed\n"); printf("new_sd_ctx_t failed\n");
release_all_resources(); release_all_resources();
return 1; return 1;
@ -1743,7 +1743,7 @@ int main(int argc, const char* argv[]) {
results = generate_video(sd_ctx, &vid_gen_params, &num_results); results = generate_video(sd_ctx, &vid_gen_params, &num_results);
} }
if (results == NULL) { if (results == nullptr) {
printf("generate failed\n"); printf("generate failed\n");
free_sd_ctx(sd_ctx); free_sd_ctx(sd_ctx);
return 1; return 1;
@ -1759,17 +1759,17 @@ int main(int argc, const char* argv[]) {
params.diffusion_conv_direct, params.diffusion_conv_direct,
params.n_threads); params.n_threads);
if (upscaler_ctx == NULL) { if (upscaler_ctx == nullptr) {
printf("new_upscaler_ctx failed\n"); printf("new_upscaler_ctx failed\n");
} else { } else {
for (int i = 0; i < num_results; i++) { for (int i = 0; i < num_results; i++) {
if (results[i].data == NULL) { if (results[i].data == nullptr) {
continue; continue;
} }
sd_image_t current_image = results[i]; sd_image_t current_image = results[i];
for (int u = 0; u < params.upscale_repeats; ++u) { for (int u = 0; u < params.upscale_repeats; ++u) {
sd_image_t upscaled_image = upscale(upscaler_ctx, current_image, upscale_factor); sd_image_t upscaled_image = upscale(upscaler_ctx, current_image, upscale_factor);
if (upscaled_image.data == NULL) { if (upscaled_image.data == nullptr) {
printf("upscale failed\n"); printf("upscale failed\n");
break; break;
} }
@ -1827,7 +1827,7 @@ int main(int argc, const char* argv[]) {
file_ext = ".png"; file_ext = ".png";
} }
for (int i = 0; i < num_results; i++) { for (int i = 0; i < num_results; i++) {
if (results[i].data == NULL) { if (results[i].data == nullptr) {
continue; continue;
} }
std::string final_image_path = i > 0 ? base_path + "_" + std::to_string(i + 1) + file_ext : base_path + file_ext; std::string final_image_path = i > 0 ? base_path + "_" + std::to_string(i + 1) + file_ext : base_path + file_ext;
@ -1845,7 +1845,7 @@ int main(int argc, const char* argv[]) {
for (int i = 0; i < num_results; i++) { for (int i = 0; i < num_results; i++) {
free(results[i].data); free(results[i].data);
results[i].data = NULL; results[i].data = nullptr;
} }
free(results); free(results);

View File

@ -1,6 +1,7 @@
#ifndef __FLUX_HPP__ #ifndef __FLUX_HPP__
#define __FLUX_HPP__ #define __FLUX_HPP__
#include <memory>
#include <vector> #include <vector>
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
@ -18,7 +19,7 @@ namespace Flux {
blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, true)); blocks["out_layer"] = std::shared_ptr<GGMLBlock>(new Linear(hidden_dim, hidden_dim, true));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [..., in_dim] // x: [..., in_dim]
// return: [..., hidden_dim] // return: [..., hidden_dim]
auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]); auto in_layer = std::dynamic_pointer_cast<Linear>(blocks["in_layer"]);
@ -36,7 +37,7 @@ namespace Flux {
int64_t hidden_size; int64_t hidden_size;
float eps; float eps;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
ggml_type wtype = GGML_TYPE_F32; ggml_type wtype = GGML_TYPE_F32;
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
} }
@ -47,7 +48,7 @@ namespace Flux {
: hidden_size(hidden_size), : hidden_size(hidden_size),
eps(eps) {} eps(eps) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
struct ggml_tensor* w = params["scale"]; struct ggml_tensor* w = params["scale"];
x = ggml_rms_norm(ctx, x, eps); x = ggml_rms_norm(ctx, x, eps);
x = ggml_mul(ctx, x, w); x = ggml_mul(ctx, x, w);
@ -136,11 +137,11 @@ namespace Flux {
}; };
struct ModulationOut { struct ModulationOut {
ggml_tensor* shift = NULL; ggml_tensor* shift = nullptr;
ggml_tensor* scale = NULL; ggml_tensor* scale = nullptr;
ggml_tensor* gate = NULL; ggml_tensor* gate = nullptr;
ModulationOut(ggml_tensor* shift = NULL, ggml_tensor* scale = NULL, ggml_tensor* gate = NULL) ModulationOut(ggml_tensor* shift = nullptr, ggml_tensor* scale = nullptr, ggml_tensor* gate = nullptr)
: shift(shift), scale(scale), gate(gate) {} : shift(shift), scale(scale), gate(gate) {}
ModulationOut(struct ggml_context* ctx, ggml_tensor* vec, int64_t offset) { ModulationOut(struct ggml_context* ctx, ggml_tensor* vec, int64_t offset) {
@ -259,7 +260,7 @@ namespace Flux {
struct ggml_tensor* txt, struct ggml_tensor* txt,
struct ggml_tensor* vec, struct ggml_tensor* vec,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* mask = NULL) { struct ggml_tensor* mask = nullptr) {
// img: [N, n_img_token, hidden_size] // img: [N, n_img_token, hidden_size]
// txt: [N, n_txt_token, hidden_size] // txt: [N, n_txt_token, hidden_size]
// pe: [n_img_token + n_txt_token, d_head/2, 2, 2] // pe: [n_img_token + n_txt_token, d_head/2, 2, 2]
@ -398,7 +399,7 @@ namespace Flux {
ModulationOut get_distil_mod(struct ggml_context* ctx, struct ggml_tensor* vec) { ModulationOut get_distil_mod(struct ggml_context* ctx, struct ggml_tensor* vec) {
int64_t offset = 3 * idx; int64_t offset = 3 * idx;
return ModulationOut(ctx, vec, offset); return {ctx, vec, offset};
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* forward(struct ggml_context* ctx,
@ -406,7 +407,7 @@ namespace Flux {
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* vec, struct ggml_tensor* vec,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* mask = NULL) { struct ggml_tensor* mask = nullptr) {
// x: [N, n_token, hidden_size] // x: [N, n_token, hidden_size]
// pe: [n_token, d_head/2, 2, 2] // pe: [n_token, d_head/2, 2, 2]
// return: [N, n_token, hidden_size] // return: [N, n_token, hidden_size]
@ -485,7 +486,7 @@ namespace Flux {
auto shift = ggml_view_2d(ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim] auto shift = ggml_view_2d(ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 0)); // [N, dim]
auto scale = ggml_view_2d(ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 1)); // [N, dim] auto scale = ggml_view_2d(ctx, vec, vec->ne[0], vec->ne[1], vec->nb[1], stride * (offset + 1)); // [N, dim]
// No gate // No gate
return ModulationOut(shift, scale, NULL); return {shift, scale, nullptr};
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* forward(struct ggml_context* ctx,
@ -664,7 +665,7 @@ namespace Flux {
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor* guidance, struct ggml_tensor* guidance,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* mod_index_arange = NULL, struct ggml_tensor* mod_index_arange = nullptr,
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]); auto img_in = std::dynamic_pointer_cast<Linear>(blocks["img_in"]);
auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]); auto txt_in = std::dynamic_pointer_cast<Linear>(blocks["txt_in"]);
@ -672,7 +673,7 @@ namespace Flux {
img = img_in->forward(ctx, img); img = img_in->forward(ctx, img);
struct ggml_tensor* vec; struct ggml_tensor* vec;
struct ggml_tensor* txt_img_mask = NULL; struct ggml_tensor* txt_img_mask = nullptr;
if (params.is_chroma) { if (params.is_chroma) {
int64_t mod_index_length = 344; int64_t mod_index_length = 344;
auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]); auto approx = std::dynamic_pointer_cast<ChromaApproximator>(blocks["distilled_guidance_layer"]);
@ -681,7 +682,7 @@ namespace Flux {
// auto mod_index_arange = ggml_arange(ctx, 0, (float)mod_index_length, 1); // auto mod_index_arange = ggml_arange(ctx, 0, (float)mod_index_length, 1);
// ggml_arange tot working on a lot of backends, precomputing it on CPU instead // ggml_arange tot working on a lot of backends, precomputing it on CPU instead
GGML_ASSERT(arange != NULL); GGML_ASSERT(arange != nullptr);
auto modulation_index = ggml_nn_timestep_embedding(ctx, mod_index_arange, 32, 10000, 1000.f); // [1, 344, 32] auto modulation_index = ggml_nn_timestep_embedding(ctx, mod_index_arange, 32, 10000, 1000.f); // [1, 344, 32]
// Batch broadcast (will it ever be useful) // Batch broadcast (will it ever be useful)
@ -695,7 +696,7 @@ namespace Flux {
vec = ggml_cont(ctx, ggml_permute(ctx, vec, 0, 2, 1, 3)); // [344, N, 64] vec = ggml_cont(ctx, ggml_permute(ctx, vec, 0, 2, 1, 3)); // [344, N, 64]
vec = approx->forward(ctx, vec); // [344, N, hidden_size] vec = approx->forward(ctx, vec); // [344, N, hidden_size]
if (y != NULL) { if (y != nullptr) {
txt_img_mask = ggml_pad(ctx, y, img->ne[1], 0, 0, 0); txt_img_mask = ggml_pad(ctx, y, img->ne[1], 0, 0, 0);
} }
} else { } else {
@ -703,7 +704,7 @@ namespace Flux {
auto vector_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["vector_in"]); auto vector_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["vector_in"]);
vec = time_in->forward(ctx, ggml_nn_timestep_embedding(ctx, timesteps, 256, 10000, 1000.f)); vec = time_in->forward(ctx, ggml_nn_timestep_embedding(ctx, timesteps, 256, 10000, 1000.f));
if (params.guidance_embed) { if (params.guidance_embed) {
GGML_ASSERT(guidance != NULL); GGML_ASSERT(guidance != nullptr);
auto guidance_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["guidance_in"]); auto guidance_in = std::dynamic_pointer_cast<MLPEmbedder>(blocks["guidance_in"]);
// bf16 and fp16 result is different // bf16 and fp16 result is different
auto g_in = ggml_nn_timestep_embedding(ctx, guidance, 256, 10000, 1000.f); auto g_in = ggml_nn_timestep_embedding(ctx, guidance, 256, 10000, 1000.f);
@ -775,14 +776,14 @@ namespace Flux {
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor* guidance, struct ggml_tensor* guidance,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* mod_index_arange = NULL, struct ggml_tensor* mod_index_arange = nullptr,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
std::vector<int> skip_layers = {}) { std::vector<int> skip_layers = {}) {
// Forward pass of DiT. // Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
// timestep: (N,) tensor of diffusion timesteps // timestep: (N,) tensor of diffusion timesteps
// context: (N, L, D) // context: (N, L, D)
// c_concat: NULL, or for (N,C+M, H, W) for Fill // c_concat: nullptr, or for (N,C+M, H, W) for Fill
// y: (N, adm_in_channels) tensor of class labels // y: (N, adm_in_channels) tensor of class labels
// guidance: (N,) // guidance: (N,)
// pe: (L, d_head/2, 2, 2) // pe: (L, d_head/2, 2, 2)
@ -801,7 +802,7 @@ namespace Flux {
uint64_t img_tokens = img->ne[1]; uint64_t img_tokens = img->ne[1];
if (params.version == VERSION_FLUX_FILL) { if (params.version == VERSION_FLUX_FILL) {
GGML_ASSERT(c_concat != NULL); GGML_ASSERT(c_concat != nullptr);
ggml_tensor* masked = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0); ggml_tensor* masked = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0);
ggml_tensor* mask = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 8 * 8, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C); ggml_tensor* mask = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 8 * 8, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C);
@ -810,7 +811,7 @@ namespace Flux {
img = ggml_concat(ctx, img, ggml_concat(ctx, masked, mask, 0), 0); img = ggml_concat(ctx, img, ggml_concat(ctx, masked, mask, 0), 0);
} else if (params.version == VERSION_FLEX_2) { } else if (params.version == VERSION_FLEX_2) {
GGML_ASSERT(c_concat != NULL); GGML_ASSERT(c_concat != nullptr);
ggml_tensor* masked = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0); ggml_tensor* masked = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], 0);
ggml_tensor* mask = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 1, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C); ggml_tensor* mask = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], 1, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * C);
ggml_tensor* control = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * (C + 1)); ggml_tensor* control = ggml_view_4d(ctx, c_concat, c_concat->ne[0], c_concat->ne[1], C, 1, c_concat->nb[1], c_concat->nb[2], c_concat->nb[3], c_concat->nb[2] * (C + 1));
@ -825,7 +826,7 @@ namespace Flux {
img = ggml_concat(ctx, img, ggml_concat(ctx, ggml_concat(ctx, masked, mask, 0), control, 0), 0); img = ggml_concat(ctx, img, ggml_concat(ctx, ggml_concat(ctx, masked, mask, 0), control, 0), 0);
} else if (params.version == VERSION_FLUX_CONTROLS) { } else if (params.version == VERSION_FLUX_CONTROLS) {
GGML_ASSERT(c_concat != NULL); GGML_ASSERT(c_concat != nullptr);
ggml_tensor* control = ggml_pad(ctx, c_concat, pad_w, pad_h, 0, 0); ggml_tensor* control = ggml_pad(ctx, c_concat, pad_w, pad_h, 0, 0);
control = patchify(ctx, control, patch_size); control = patchify(ctx, control, patch_size);
@ -924,7 +925,7 @@ namespace Flux {
flux.init(params_ctx, tensor_types, prefix); flux.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "flux"; return "flux";
} }
@ -944,18 +945,18 @@ namespace Flux {
GGML_ASSERT(x->ne[3] == 1); GGML_ASSERT(x->ne[3] == 1);
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, FLUX_GRAPH_SIZE, false); struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, FLUX_GRAPH_SIZE, false);
struct ggml_tensor* mod_index_arange = NULL; struct ggml_tensor* mod_index_arange = nullptr;
x = to_backend(x); x = to_backend(x);
context = to_backend(context); context = to_backend(context);
if (c_concat != NULL) { if (c_concat != nullptr) {
c_concat = to_backend(c_concat); c_concat = to_backend(c_concat);
} }
if (flux_params.is_chroma) { if (flux_params.is_chroma) {
guidance = ggml_set_f32(guidance, 0); guidance = ggml_set_f32(guidance, 0);
if (!use_mask) { if (!use_mask) {
y = NULL; y = nullptr;
} }
// ggml_arange is not working on some backends, precompute it // ggml_arange is not working on some backends, precompute it
@ -987,7 +988,7 @@ namespace Flux {
auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, flux_params.axes_dim_sum / 2, pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, flux_params.axes_dim_sum / 2, pos_len);
// pe->data = pe_vec.data(); // pe->data = pe_vec.data();
// print_ggml_tensor(pe); // print_ggml_tensor(pe);
// pe->data = NULL; // pe->data = nullptr;
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
struct ggml_tensor* out = flux.forward(compute_ctx, struct ggml_tensor* out = flux.forward(compute_ctx,
@ -1017,8 +1018,8 @@ namespace Flux {
struct ggml_tensor* guidance, struct ggml_tensor* guidance,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL, struct ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) { std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
@ -1035,11 +1036,11 @@ namespace Flux {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(20 * 1024 * 1024); // 20 MB params.mem_size = static_cast<size_t>(20 * 1024 * 1024); // 20 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
// cpu f16: // cpu f16:
@ -1063,10 +1064,10 @@ namespace Flux {
ggml_set_f32(y, 0.01f); ggml_set_f32(y, 0.01f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, timesteps, context, NULL, y, guidance, {}, false, &out, work_ctx); compute(8, x, timesteps, context, nullptr, y, guidance, {}, false, &out, work_ctx);
int t1 = ggml_time_ms(); int t1 = ggml_time_ms();
print_ggml_tensor(out); print_ggml_tensor(out);
@ -1078,7 +1079,7 @@ namespace Flux {
// ggml_backend_t backend = ggml_backend_cuda_init(0); // ggml_backend_t backend = ggml_backend_cuda_init(0);
ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_t backend = ggml_backend_cpu_init();
ggml_type model_data_type = GGML_TYPE_Q8_0; ggml_type model_data_type = GGML_TYPE_Q8_0;
std::shared_ptr<FluxRunner> flux = std::shared_ptr<FluxRunner>(new FluxRunner(backend, false)); std::shared_ptr<FluxRunner> flux = std::make_shared<FluxRunner>(backend, false);
{ {
LOG_INFO("loading from '%s'", file_path.c_str()); LOG_INFO("loading from '%s'", file_path.c_str());

View File

@ -1,5 +1,8 @@
for f in *.cpp *.h *.hpp examples/cli/*.cpp examples/cli/*.h; do for f in *.cpp *.h *.hpp examples/cli/*.cpp examples/cli/*.h; do
[[ "$f" == vocab* ]] && continue [[ "$f" == vocab* ]] && continue
echo "formatting '$f'" echo "formatting '$f'"
# if [ "$f" != "stable-diffusion.h" ]; then
# clang-tidy -fix -p build_linux/ "$f"
# fi
clang-format -style=file -i "$f" clang-format -style=file -i "$f"
done done

View File

@ -105,7 +105,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_mul_n_mode(struct ggml_context* ctx,
return result; return result;
} }
__STATIC_INLINE__ struct ggml_tensor* ggml_merge_lora(ggml_context* ctx, struct ggml_tensor* lora_down, struct ggml_tensor* lora_up, struct ggml_tensor* lora_mid = NULL) { __STATIC_INLINE__ struct ggml_tensor* ggml_merge_lora(ggml_context* ctx, struct ggml_tensor* lora_down, struct ggml_tensor* lora_up, struct ggml_tensor* lora_mid = nullptr) {
struct ggml_tensor* updown; struct ggml_tensor* updown;
// flat lora tensors to multiply it // flat lora tensors to multiply it
int64_t lora_up_rows = lora_up->ne[ggml_n_dims(lora_up) - 1]; int64_t lora_up_rows = lora_up->ne[ggml_n_dims(lora_up) - 1];
@ -118,7 +118,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_merge_lora(ggml_context* ctx, struct
// ggml_mul_mat requires tensor b transposed // ggml_mul_mat requires tensor b transposed
lora_down = ggml_cont(ctx, ggml_transpose(ctx, lora_down)); lora_down = ggml_cont(ctx, ggml_transpose(ctx, lora_down));
if (lora_mid == NULL) { if (lora_mid == nullptr) {
updown = ggml_mul_mat(ctx, lora_up, lora_down); updown = ggml_mul_mat(ctx, lora_up, lora_down);
updown = ggml_cont(ctx, ggml_transpose(ctx, updown)); updown = ggml_cont(ctx, ggml_transpose(ctx, updown));
} else { } else {
@ -165,7 +165,7 @@ __STATIC_INLINE__ void ggml_tensor_set_f32(struct ggml_tensor* tensor, float val
} }
__STATIC_INLINE__ float ggml_tensor_get_f32(const ggml_tensor* tensor, int l, int k = 0, int j = 0, int i = 0) { __STATIC_INLINE__ float ggml_tensor_get_f32(const ggml_tensor* tensor, int l, int k = 0, int j = 0, int i = 0) {
if (tensor->buffer != NULL) { if (tensor->buffer != nullptr) {
float value; float value;
ggml_backend_tensor_get(tensor, &value, i * tensor->nb[3] + j * tensor->nb[2] + k * tensor->nb[1] + l * tensor->nb[0], sizeof(float)); ggml_backend_tensor_get(tensor, &value, i * tensor->nb[3] + j * tensor->nb[2] + k * tensor->nb[1] + l * tensor->nb[0], sizeof(float));
return value; return value;
@ -175,7 +175,7 @@ __STATIC_INLINE__ float ggml_tensor_get_f32(const ggml_tensor* tensor, int l, in
} }
__STATIC_INLINE__ int ggml_tensor_get_i32(const ggml_tensor* tensor, int l, int k = 0, int j = 0, int i = 0) { __STATIC_INLINE__ int ggml_tensor_get_i32(const ggml_tensor* tensor, int l, int k = 0, int j = 0, int i = 0) {
if (tensor->buffer != NULL) { if (tensor->buffer != nullptr) {
float value; float value;
ggml_backend_tensor_get(tensor, &value, i * tensor->nb[3] + j * tensor->nb[2] + k * tensor->nb[1] + l * tensor->nb[0], sizeof(int)); ggml_backend_tensor_get(tensor, &value, i * tensor->nb[3] + j * tensor->nb[2] + k * tensor->nb[1] + l * tensor->nb[0], sizeof(int));
return value; return value;
@ -292,7 +292,7 @@ __STATIC_INLINE__ ggml_tensor* load_tensor_from_file(ggml_context* ctx, const st
std::ifstream file(file_path, std::ios::binary); std::ifstream file(file_path, std::ios::binary);
if (!file.is_open()) { if (!file.is_open()) {
LOG_ERROR("failed to open '%s'", file_path.c_str()); LOG_ERROR("failed to open '%s'", file_path.c_str());
return NULL; return nullptr;
} }
int32_t n_dims; int32_t n_dims;
int32_t length; int32_t length;
@ -306,7 +306,7 @@ __STATIC_INLINE__ ggml_tensor* load_tensor_from_file(ggml_context* ctx, const st
if (file.eof()) { if (file.eof()) {
LOG_ERROR("incomplete file '%s'", file_path.c_str()); LOG_ERROR("incomplete file '%s'", file_path.c_str());
return NULL; return nullptr;
} }
int32_t nelements = 1; int32_t nelements = 1;
@ -354,7 +354,7 @@ __STATIC_INLINE__ void copy_ggml_tensor(struct ggml_tensor* dst, struct ggml_ten
} }
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = 10 * 1024 * 1024; // for padding params.mem_size = 10 * 1024 * 1024; // for padding
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* ctx = ggml_init(params); struct ggml_context* ctx = ggml_init(params);
if (!ctx) { if (!ctx) {
@ -860,7 +860,7 @@ __STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
params.mem_size += input_tile_size_x * input_tile_size_y * input->ne[2] * input->ne[3] * sizeof(float); // input chunk params.mem_size += input_tile_size_x * input_tile_size_y * input->ne[2] * input->ne[3] * sizeof(float); // input chunk
params.mem_size += output_tile_size_x * output_tile_size_y * output->ne[2] * output->ne[3] * sizeof(float); // output chunk params.mem_size += output_tile_size_x * output_tile_size_y * output->ne[2] * output->ne[3] * sizeof(float); // output chunk
params.mem_size += 3 * ggml_tensor_overhead(); params.mem_size += 3 * ggml_tensor_overhead();
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
LOG_DEBUG("tile work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f); LOG_DEBUG("tile work buffer size: %.2f MB", params.mem_size / 1024.f / 1024.f);
@ -961,7 +961,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_linear(struct ggml_context* ctx,
if (scale != 1.f) { if (scale != 1.f) {
x = ggml_scale(ctx, x, 1.f / scale); x = ggml_scale(ctx, x, 1.f / scale);
} }
if (b != NULL) { if (b != nullptr) {
x = ggml_add_inplace(ctx, x, b); x = ggml_add_inplace(ctx, x, b);
} }
return x; return x;
@ -994,7 +994,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_conv_2d(struct ggml_context* ctx,
if (scale != 1.f) { if (scale != 1.f) {
x = ggml_scale(ctx, x, 1.f / scale); x = ggml_scale(ctx, x, 1.f / scale);
} }
if (b != NULL) { if (b != nullptr) {
b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1); b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1);
x = ggml_add_inplace(ctx, x, b); x = ggml_add_inplace(ctx, x, b);
} }
@ -1023,7 +1023,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_conv_3d(struct ggml_context* ctx,
int64_t N = x->ne[3] / IC; int64_t N = x->ne[3] / IC;
x = ggml_conv_3d(ctx, w, x, IC, s0, s1, s2, p0, p1, p2, d0, d1, d2); x = ggml_conv_3d(ctx, w, x, IC, s0, s1, s2, p0, p1, p2, d0, d1, d2);
if (b != NULL) { if (b != nullptr) {
b = ggml_reshape_4d(ctx, b, 1, 1, 1, b->ne[0]); // [OC, 1, 1, 1] b = ggml_reshape_4d(ctx, b, 1, 1, 1, b->ne[0]); // [OC, 1, 1, 1]
x = ggml_add_inplace(ctx, x, b); x = ggml_add_inplace(ctx, x, b);
} }
@ -1042,7 +1042,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_conv_3d_nx1x1(struct ggml_context*
int p2 = 1, int p2 = 1,
int d2 = 1) { int d2 = 1) {
x = ggml_conv_2d(ctx, w, x, 1, s2, 0, p2, 1, d2); // [N, OC, T, OH * OW] x = ggml_conv_2d(ctx, w, x, 1, s2, 0, p2, 1, d2); // [N, OC, T, OH * OW]
if (b != NULL) { if (b != nullptr) {
b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1); b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1);
x = ggml_add(ctx, x, b); x = ggml_add(ctx, x, b);
} }
@ -1146,7 +1146,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention_ext(struct ggml_context*
struct ggml_tensor* k, struct ggml_tensor* k,
struct ggml_tensor* v, struct ggml_tensor* v,
int64_t n_head, int64_t n_head,
struct ggml_tensor* mask = NULL, struct ggml_tensor* mask = nullptr,
bool diag_mask_inf = false, bool diag_mask_inf = false,
bool skip_reshape = false, bool skip_reshape = false,
bool flash_attn = false, // avoid overflow bool flash_attn = false, // avoid overflow
@ -1293,9 +1293,9 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_layer_norm(struct ggml_context* ct
struct ggml_tensor* b, struct ggml_tensor* b,
float eps = EPS) { float eps = EPS) {
x = ggml_norm(ctx, x, eps); x = ggml_norm(ctx, x, eps);
if (w != NULL) { if (w != nullptr) {
x = ggml_mul_inplace(ctx, x, w); x = ggml_mul_inplace(ctx, x, w);
if (b != NULL) { if (b != nullptr) {
x = ggml_add_inplace(ctx, x, b); x = ggml_add_inplace(ctx, x, b);
} }
} }
@ -1307,14 +1307,14 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_group_norm(struct ggml_context* ct
struct ggml_tensor* w, struct ggml_tensor* w,
struct ggml_tensor* b, struct ggml_tensor* b,
int num_groups = 32) { int num_groups = 32) {
if (ggml_n_dims(x) >= 3 && w != NULL && b != NULL) { if (ggml_n_dims(x) >= 3 && w != nullptr && b != nullptr) {
w = ggml_reshape_4d(ctx, w, 1, 1, w->ne[0], 1); w = ggml_reshape_4d(ctx, w, 1, 1, w->ne[0], 1);
b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1); b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1);
} }
const float eps = 1e-6f; // default eps parameter const float eps = 1e-6f; // default eps parameter
x = ggml_group_norm(ctx, x, num_groups, eps); x = ggml_group_norm(ctx, x, num_groups, eps);
if (w != NULL && b != NULL) { if (w != nullptr && b != nullptr) {
x = ggml_mul_inplace(ctx, x, w); x = ggml_mul_inplace(ctx, x, w);
// b = ggml_repeat(ctx, b, x); // b = ggml_repeat(ctx, b, x);
x = ggml_add_inplace(ctx, x, b); x = ggml_add_inplace(ctx, x, b);
@ -1422,7 +1422,7 @@ __STATIC_INLINE__ struct ggml_tensor* new_timestep_embedding(struct ggml_context
// embedding: [N, dim] // embedding: [N, dim]
std::vector<float> embedding_vec = timestep_embedding(timesteps, dim, max_period); std::vector<float> embedding_vec = timestep_embedding(timesteps, dim, max_period);
struct ggml_tensor* embedding = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, dim, timesteps.size()); struct ggml_tensor* embedding = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, dim, timesteps.size());
if (embedding->data != NULL) { if (embedding->data != nullptr) {
memcpy(((char*)embedding->data), ((char*)embedding_vec.data()), ggml_nbytes(embedding)); memcpy(((char*)embedding->data), ((char*)embedding_vec.data()), ggml_nbytes(embedding));
} else { } else {
ggml_backend_tensor_set(embedding, embedding_vec.data(), 0, ggml_nbytes(embedding)); ggml_backend_tensor_set(embedding, embedding_vec.data(), 0, ggml_nbytes(embedding));
@ -1458,23 +1458,23 @@ struct GGMLRunner {
protected: protected:
typedef std::function<struct ggml_cgraph*()> get_graph_cb_t; typedef std::function<struct ggml_cgraph*()> get_graph_cb_t;
ggml_backend_t params_backend = NULL; ggml_backend_t params_backend = nullptr;
ggml_backend_t runtime_backend = NULL; ggml_backend_t runtime_backend = nullptr;
struct ggml_context* params_ctx = NULL; struct ggml_context* params_ctx = nullptr;
ggml_backend_buffer_t params_buffer = NULL; ggml_backend_buffer_t params_buffer = nullptr;
struct ggml_context* offload_ctx = NULL; struct ggml_context* offload_ctx = nullptr;
ggml_backend_buffer_t runtime_params_buffer = NULL; ggml_backend_buffer_t runtime_params_buffer = nullptr;
bool params_on_runtime_backend = false; bool params_on_runtime_backend = false;
struct ggml_context* cache_ctx = NULL; struct ggml_context* cache_ctx = nullptr;
ggml_backend_buffer_t cache_buffer = NULL; ggml_backend_buffer_t cache_buffer = nullptr;
struct ggml_context* compute_ctx = NULL; struct ggml_context* compute_ctx = nullptr;
struct ggml_gallocr* compute_allocr = NULL; struct ggml_gallocr* compute_allocr = nullptr;
std::vector<float> one_vec = {1.f}; std::vector<float> one_vec = {1.f};
ggml_tensor* one_tensor = NULL; ggml_tensor* one_tensor = nullptr;
std::map<struct ggml_tensor*, const void*> backend_tensor_data_map; std::map<struct ggml_tensor*, const void*> backend_tensor_data_map;
std::map<std::string, struct ggml_tensor*> cache_tensor_map; // name -> tensor std::map<std::string, struct ggml_tensor*> cache_tensor_map; // name -> tensor
@ -1483,59 +1483,59 @@ protected:
void alloc_params_ctx() { void alloc_params_ctx() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead()); params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead());
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = true; params.no_alloc = true;
params_ctx = ggml_init(params); params_ctx = ggml_init(params);
GGML_ASSERT(params_ctx != NULL); GGML_ASSERT(params_ctx != nullptr);
if (params_backend != runtime_backend) { if (params_backend != runtime_backend) {
offload_ctx = ggml_init(params); offload_ctx = ggml_init(params);
GGML_ASSERT(offload_ctx != NULL); GGML_ASSERT(offload_ctx != nullptr);
} }
} }
void free_params_ctx() { void free_params_ctx() {
if (params_ctx != NULL) { if (params_ctx != nullptr) {
ggml_free(params_ctx); ggml_free(params_ctx);
params_ctx = NULL; params_ctx = nullptr;
} }
if (offload_ctx != NULL) { if (offload_ctx != nullptr) {
ggml_free(offload_ctx); ggml_free(offload_ctx);
offload_ctx = NULL; offload_ctx = nullptr;
} }
} }
void alloc_cache_ctx() { void alloc_cache_ctx() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead()); params.mem_size = static_cast<size_t>(MAX_PARAMS_TENSOR_NUM * ggml_tensor_overhead());
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = true; params.no_alloc = true;
cache_ctx = ggml_init(params); cache_ctx = ggml_init(params);
GGML_ASSERT(cache_ctx != NULL); GGML_ASSERT(cache_ctx != nullptr);
} }
void free_cache_ctx() { void free_cache_ctx() {
if (cache_ctx != NULL) { if (cache_ctx != nullptr) {
ggml_free(cache_ctx); ggml_free(cache_ctx);
cache_ctx = NULL; cache_ctx = nullptr;
} }
} }
void alloc_compute_ctx() { void alloc_compute_ctx() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(ggml_tensor_overhead() * MAX_GRAPH_SIZE + ggml_graph_overhead()); params.mem_size = static_cast<size_t>(ggml_tensor_overhead() * MAX_GRAPH_SIZE + ggml_graph_overhead());
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = true; params.no_alloc = true;
compute_ctx = ggml_init(params); compute_ctx = ggml_init(params);
GGML_ASSERT(compute_ctx != NULL); GGML_ASSERT(compute_ctx != nullptr);
} }
void free_compute_ctx() { void free_compute_ctx() {
if (compute_ctx != NULL) { if (compute_ctx != nullptr) {
ggml_free(compute_ctx); ggml_free(compute_ctx);
compute_ctx = NULL; compute_ctx = nullptr;
} }
} }
@ -1559,7 +1559,7 @@ protected:
} }
bool alloc_compute_buffer(get_graph_cb_t get_graph) { bool alloc_compute_buffer(get_graph_cb_t get_graph) {
if (compute_allocr != NULL) { if (compute_allocr != nullptr) {
return true; return true;
} }
reset_compute_ctx(); reset_compute_ctx();
@ -1584,9 +1584,9 @@ protected:
} }
void free_cache_buffer() { void free_cache_buffer() {
if (cache_buffer != NULL) { if (cache_buffer != nullptr) {
ggml_backend_buffer_free(cache_buffer); ggml_backend_buffer_free(cache_buffer);
cache_buffer = NULL; cache_buffer = nullptr;
} }
} }
@ -1596,7 +1596,7 @@ protected:
} }
free_cache_ctx_and_buffer(); free_cache_ctx_and_buffer();
alloc_cache_ctx(); alloc_cache_ctx();
GGML_ASSERT(cache_buffer == NULL); GGML_ASSERT(cache_buffer == nullptr);
std::map<ggml_tensor*, ggml_tensor*> runtime_tensor_to_cache_tensor; std::map<ggml_tensor*, ggml_tensor*> runtime_tensor_to_cache_tensor;
for (auto kv : cache_tensor_map) { for (auto kv : cache_tensor_map) {
auto cache_tensor = ggml_dup_tensor(cache_ctx, kv.second); auto cache_tensor = ggml_dup_tensor(cache_ctx, kv.second);
@ -1605,7 +1605,7 @@ protected:
} }
size_t num_tensors = ggml_tensor_num(cache_ctx); size_t num_tensors = ggml_tensor_num(cache_ctx);
cache_buffer = ggml_backend_alloc_ctx_tensors(cache_ctx, runtime_backend); cache_buffer = ggml_backend_alloc_ctx_tensors(cache_ctx, runtime_backend);
GGML_ASSERT(cache_buffer != NULL); GGML_ASSERT(cache_buffer != nullptr);
for (auto kv : runtime_tensor_to_cache_tensor) { for (auto kv : runtime_tensor_to_cache_tensor) {
ggml_backend_tensor_copy(kv.first, kv.second); ggml_backend_tensor_copy(kv.first, kv.second);
} }
@ -1637,12 +1637,12 @@ protected:
if (params_on_runtime_backend) { if (params_on_runtime_backend) {
return true; return true;
} }
GGML_ASSERT(runtime_params_buffer == NULL); GGML_ASSERT(runtime_params_buffer == nullptr);
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
size_t num_tensors = ggml_tensor_num(offload_ctx); size_t num_tensors = ggml_tensor_num(offload_ctx);
if (num_tensors == 0) { if (num_tensors == 0) {
for (ggml_tensor* t = ggml_get_first_tensor(params_ctx); t != NULL; t = ggml_get_next_tensor(params_ctx, t)) { for (ggml_tensor* t = ggml_get_first_tensor(params_ctx); t != nullptr; t = ggml_get_next_tensor(params_ctx, t)) {
GGML_ASSERT(t->view_src == NULL); GGML_ASSERT(t->view_src == nullptr);
ggml_dup_tensor(offload_ctx, t); ggml_dup_tensor(offload_ctx, t);
} }
} }
@ -1651,7 +1651,7 @@ protected:
runtime_params_buffer = ggml_backend_alloc_ctx_tensors(offload_ctx, runtime_backend); runtime_params_buffer = ggml_backend_alloc_ctx_tensors(offload_ctx, runtime_backend);
if (runtime_params_buffer == NULL) { if (runtime_params_buffer == nullptr) {
LOG_ERROR("%s alloc runtime params backend buffer failed, num_tensors = %i", LOG_ERROR("%s alloc runtime params backend buffer failed, num_tensors = %i",
get_desc().c_str(), get_desc().c_str(),
num_tensors); num_tensors);
@ -1661,7 +1661,7 @@ protected:
ggml_tensor* t = ggml_get_first_tensor(params_ctx); ggml_tensor* t = ggml_get_first_tensor(params_ctx);
ggml_tensor* offload_t = ggml_get_first_tensor(offload_ctx); ggml_tensor* offload_t = ggml_get_first_tensor(offload_ctx);
while (t != NULL && offload_t != NULL) { while (t != nullptr && offload_t != nullptr) {
ggml_backend_tensor_copy(t, offload_t); ggml_backend_tensor_copy(t, offload_t);
std::swap(t->buffer, offload_t->buffer); std::swap(t->buffer, offload_t->buffer);
std::swap(t->data, offload_t->data); std::swap(t->data, offload_t->data);
@ -1693,21 +1693,21 @@ protected:
ggml_tensor* t = ggml_get_first_tensor(params_ctx); ggml_tensor* t = ggml_get_first_tensor(params_ctx);
ggml_tensor* offload_t = ggml_get_first_tensor(offload_ctx); ggml_tensor* offload_t = ggml_get_first_tensor(offload_ctx);
while (t != NULL && offload_t != NULL) { while (t != nullptr && offload_t != nullptr) {
t->buffer = offload_t->buffer; t->buffer = offload_t->buffer;
t->data = offload_t->data; t->data = offload_t->data;
t->extra = offload_t->extra; t->extra = offload_t->extra;
offload_t->buffer = NULL; offload_t->buffer = nullptr;
offload_t->data = NULL; offload_t->data = nullptr;
offload_t->extra = NULL; offload_t->extra = nullptr;
t = ggml_get_next_tensor(params_ctx, t); t = ggml_get_next_tensor(params_ctx, t);
offload_t = ggml_get_next_tensor(offload_ctx, offload_t); offload_t = ggml_get_next_tensor(offload_ctx, offload_t);
} }
if (runtime_params_buffer != NULL) { if (runtime_params_buffer != nullptr) {
ggml_backend_buffer_free(runtime_params_buffer); ggml_backend_buffer_free(runtime_params_buffer);
runtime_params_buffer = NULL; runtime_params_buffer = nullptr;
} }
params_on_runtime_backend = false; params_on_runtime_backend = false;
} }
@ -1744,7 +1744,7 @@ public:
bool alloc_params_buffer() { bool alloc_params_buffer() {
size_t num_tensors = ggml_tensor_num(params_ctx); size_t num_tensors = ggml_tensor_num(params_ctx);
params_buffer = ggml_backend_alloc_ctx_tensors(params_ctx, params_backend); params_buffer = ggml_backend_alloc_ctx_tensors(params_ctx, params_backend);
if (params_buffer == NULL) { if (params_buffer == nullptr) {
LOG_ERROR("%s alloc params backend buffer failed, num_tensors = %i", LOG_ERROR("%s alloc params backend buffer failed, num_tensors = %i",
get_desc().c_str(), get_desc().c_str(),
num_tensors); num_tensors);
@ -1760,14 +1760,14 @@ public:
} }
void free_params_buffer() { void free_params_buffer() {
if (params_buffer != NULL) { if (params_buffer != nullptr) {
ggml_backend_buffer_free(params_buffer); ggml_backend_buffer_free(params_buffer);
params_buffer = NULL; params_buffer = nullptr;
} }
} }
size_t get_params_buffer_size() { size_t get_params_buffer_size() {
if (params_buffer != NULL) { if (params_buffer != nullptr) {
return ggml_backend_buffer_get_size(params_buffer); return ggml_backend_buffer_get_size(params_buffer);
} }
return 0; return 0;
@ -1779,9 +1779,9 @@ public:
} }
void free_compute_buffer() { void free_compute_buffer() {
if (compute_allocr != NULL) { if (compute_allocr != nullptr) {
ggml_gallocr_free(compute_allocr); ggml_gallocr_free(compute_allocr);
compute_allocr = NULL; compute_allocr = nullptr;
} }
offload_params_to_params_backend(); offload_params_to_params_backend();
} }
@ -1792,12 +1792,12 @@ public:
} }
struct ggml_tensor* to_backend(struct ggml_tensor* tensor) { struct ggml_tensor* to_backend(struct ggml_tensor* tensor) {
GGML_ASSERT(compute_ctx != NULL); GGML_ASSERT(compute_ctx != nullptr);
if (tensor == NULL) { if (tensor == nullptr) {
return NULL; return nullptr;
} }
// it's performing a compute, check if backend isn't cpu // it's performing a compute, check if backend isn't cpu
if (!ggml_backend_is_cpu(runtime_backend) && (tensor->buffer == NULL || ggml_backend_buffer_is_host(tensor->buffer))) { if (!ggml_backend_is_cpu(runtime_backend) && (tensor->buffer == nullptr || ggml_backend_buffer_is_host(tensor->buffer))) {
// pass input tensors to gpu memory // pass input tensors to gpu memory
auto backend_tensor = ggml_dup_tensor(compute_ctx, tensor); auto backend_tensor = ggml_dup_tensor(compute_ctx, tensor);
@ -1813,8 +1813,8 @@ public:
} }
struct ggml_tensor* get_cache_tensor_by_name(const std::string& name) { struct ggml_tensor* get_cache_tensor_by_name(const std::string& name) {
if (cache_ctx == NULL) { if (cache_ctx == nullptr) {
return NULL; return nullptr;
} }
return ggml_get_tensor(cache_ctx, name.c_str()); return ggml_get_tensor(cache_ctx, name.c_str());
} }
@ -1822,8 +1822,8 @@ public:
void compute(get_graph_cb_t get_graph, void compute(get_graph_cb_t get_graph,
int n_threads, int n_threads,
bool free_compute_buffer_immediately = true, bool free_compute_buffer_immediately = true,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) {
if (!offload_params_to_runtime_backend()) { if (!offload_params_to_runtime_backend()) {
LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str()); LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str());
return; return;
@ -1842,12 +1842,12 @@ public:
ggml_graph_print(gf); ggml_graph_print(gf);
#endif #endif
copy_cache_tensors_to_cache_buffer(); copy_cache_tensors_to_cache_buffer();
if (output != NULL) { if (output != nullptr) {
auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str()); auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str());
if (*output == NULL && output_ctx != NULL) { if (*output == nullptr && output_ctx != nullptr) {
*output = ggml_dup_tensor(output_ctx, result); *output = ggml_dup_tensor(output_ctx, result);
} }
if (*output != NULL) { if (*output != nullptr) {
ggml_backend_tensor_get_and_sync(runtime_backend, result, (*output)->data, 0, ggml_nbytes(*output)); ggml_backend_tensor_get_and_sync(runtime_backend, result, (*output)->data, 0, ggml_nbytes(*output));
} }
} }
@ -1994,7 +1994,7 @@ public:
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (bias) { if (bias) {
b = params["bias"]; b = params["bias"];
} }
@ -2098,7 +2098,7 @@ public:
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (bias) { if (bias) {
b = params["bias"]; b = params["bias"];
} }
@ -2156,7 +2156,7 @@ public:
// result: [N, OC, OD, OH*OW] // result: [N, OC, OD, OH*OW]
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (bias) { if (bias) {
b = params["bias"]; b = params["bias"];
} }
@ -2205,7 +2205,7 @@ public:
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (bias) { if (bias) {
b = params["bias"]; b = params["bias"];
} }
@ -2245,8 +2245,8 @@ public:
bias(bias) {} bias(bias) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
struct ggml_tensor* w = NULL; struct ggml_tensor* w = nullptr;
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (elementwise_affine) { if (elementwise_affine) {
w = params["weight"]; w = params["weight"];
@ -2285,8 +2285,8 @@ public:
affine(affine) {} affine(affine) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
struct ggml_tensor* w = NULL; struct ggml_tensor* w = nullptr;
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (affine) { if (affine) {
w = params["weight"]; w = params["weight"];
b = params["bias"]; b = params["bias"];
@ -2369,7 +2369,7 @@ public:
struct ggml_tensor* k = k_proj->forward(ctx, x); struct ggml_tensor* k = k_proj->forward(ctx, x);
struct ggml_tensor* v = v_proj->forward(ctx, x); struct ggml_tensor* v = v_proj->forward(ctx, x);
x = ggml_nn_attention_ext(ctx, backend, q, k, v, n_head, NULL, mask); // [N, n_token, embed_dim] x = ggml_nn_attention_ext(ctx, backend, q, k, v, n_head, nullptr, mask); // [N, n_token, embed_dim]
x = out_proj->forward(ctx, x); // [N, n_token, embed_dim] x = out_proj->forward(ctx, x); // [N, n_token, embed_dim]
return x; return x;

View File

@ -100,7 +100,7 @@ struct LoraModel : public GGMLRunner {
bool load_failed = false; bool load_failed = false;
bool applied = false; bool applied = false;
std::vector<int> zero_index_vec = {0}; std::vector<int> zero_index_vec = {0};
ggml_tensor* zero_index = NULL; ggml_tensor* zero_index = nullptr;
enum lora_t type = REGULAR; enum lora_t type = REGULAR;
LoraModel(ggml_backend_t backend, LoraModel(ggml_backend_t backend,
@ -112,7 +112,7 @@ struct LoraModel : public GGMLRunner {
} }
} }
std::string get_desc() { std::string get_desc() override {
return "lora"; return "lora";
} }
@ -287,7 +287,7 @@ struct LoraModel : public GGMLRunner {
if (is_qkvm_split) { if (is_qkvm_split) {
key = key.substr(sizeof("SPLIT_L|") - 1); key = key.substr(sizeof("SPLIT_L|") - 1);
} }
struct ggml_tensor* updown = NULL; struct ggml_tensor* updown = nullptr;
float scale_value = 1.0f; float scale_value = 1.0f;
std::string full_key = lora_pre[type] + key; std::string full_key = lora_pre[type] + key;
if (is_bias) { if (is_bias) {
@ -314,13 +314,13 @@ struct LoraModel : public GGMLRunner {
} }
std::string alpha_name = ""; std::string alpha_name = "";
ggml_tensor* hada_1_mid = NULL; // tau for tucker decomposition ggml_tensor* hada_1_mid = nullptr; // tau for tucker decomposition
ggml_tensor* hada_1_up = NULL; ggml_tensor* hada_1_up = nullptr;
ggml_tensor* hada_1_down = NULL; ggml_tensor* hada_1_down = nullptr;
ggml_tensor* hada_2_mid = NULL; // tau for tucker decomposition ggml_tensor* hada_2_mid = nullptr; // tau for tucker decomposition
ggml_tensor* hada_2_up = NULL; ggml_tensor* hada_2_up = nullptr;
ggml_tensor* hada_2_down = NULL; ggml_tensor* hada_2_down = nullptr;
std::string hada_1_mid_name = ""; std::string hada_1_mid_name = "";
std::string hada_1_down_name = ""; std::string hada_1_down_name = "";
@ -368,7 +368,7 @@ struct LoraModel : public GGMLRunner {
applied_lora_tensors.insert(hada_2_up_name); applied_lora_tensors.insert(hada_2_up_name);
applied_lora_tensors.insert(alpha_name); applied_lora_tensors.insert(alpha_name);
if (hada_1_up == NULL || hada_1_down == NULL || hada_2_up == NULL || hada_2_down == NULL) { if (hada_1_up == nullptr || hada_1_down == nullptr || hada_2_up == nullptr || hada_2_down == nullptr) {
continue; continue;
} }
@ -394,8 +394,8 @@ struct LoraModel : public GGMLRunner {
std::string alpha_name = full_key + ".alpha"; std::string alpha_name = full_key + ".alpha";
ggml_tensor* lokr_w1 = NULL; ggml_tensor* lokr_w1 = nullptr;
ggml_tensor* lokr_w2 = NULL; ggml_tensor* lokr_w2 = nullptr;
std::string lokr_w1_name = ""; std::string lokr_w1_name = "";
std::string lokr_w2_name = ""; std::string lokr_w2_name = "";
@ -407,8 +407,8 @@ struct LoraModel : public GGMLRunner {
lokr_w1 = to_f32(compute_ctx, lora_tensors[lokr_w1_name]); lokr_w1 = to_f32(compute_ctx, lora_tensors[lokr_w1_name]);
applied_lora_tensors.insert(lokr_w1_name); applied_lora_tensors.insert(lokr_w1_name);
} else { } else {
ggml_tensor* down = NULL; ggml_tensor* down = nullptr;
ggml_tensor* up = NULL; ggml_tensor* up = nullptr;
std::string down_name = lokr_w1_name + "_b"; std::string down_name = lokr_w1_name + "_b";
std::string up_name = lokr_w1_name + "_a"; std::string up_name = lokr_w1_name + "_a";
if (lora_tensors.find(down_name) != lora_tensors.end()) { if (lora_tensors.find(down_name) != lora_tensors.end()) {
@ -432,8 +432,8 @@ struct LoraModel : public GGMLRunner {
lokr_w2 = to_f32(compute_ctx, lora_tensors[lokr_w2_name]); lokr_w2 = to_f32(compute_ctx, lora_tensors[lokr_w2_name]);
applied_lora_tensors.insert(lokr_w2_name); applied_lora_tensors.insert(lokr_w2_name);
} else { } else {
ggml_tensor* down = NULL; ggml_tensor* down = nullptr;
ggml_tensor* up = NULL; ggml_tensor* up = nullptr;
std::string down_name = lokr_w2_name + "_b"; std::string down_name = lokr_w2_name + "_b";
std::string up_name = lokr_w2_name + "_a"; std::string up_name = lokr_w2_name + "_a";
if (lora_tensors.find(down_name) != lora_tensors.end()) { if (lora_tensors.find(down_name) != lora_tensors.end()) {
@ -460,9 +460,9 @@ struct LoraModel : public GGMLRunner {
} else { } else {
// LoRA mode // LoRA mode
ggml_tensor* lora_mid = NULL; // tau for tucker decomposition ggml_tensor* lora_mid = nullptr; // tau for tucker decomposition
ggml_tensor* lora_up = NULL; ggml_tensor* lora_up = nullptr;
ggml_tensor* lora_down = NULL; ggml_tensor* lora_down = nullptr;
std::string alpha_name = ""; std::string alpha_name = "";
std::string scale_name = ""; std::string scale_name = "";
@ -497,12 +497,12 @@ struct LoraModel : public GGMLRunner {
auto split_k_alpha_name = full_key + "k" + suffix + ".alpha"; auto split_k_alpha_name = full_key + "k" + suffix + ".alpha";
auto split_v_alpha_name = full_key + "v" + suffix + ".alpha"; auto split_v_alpha_name = full_key + "v" + suffix + ".alpha";
ggml_tensor* lora_q_down = NULL; ggml_tensor* lora_q_down = nullptr;
ggml_tensor* lora_q_up = NULL; ggml_tensor* lora_q_up = nullptr;
ggml_tensor* lora_k_down = NULL; ggml_tensor* lora_k_down = nullptr;
ggml_tensor* lora_k_up = NULL; ggml_tensor* lora_k_up = nullptr;
ggml_tensor* lora_v_down = NULL; ggml_tensor* lora_v_down = nullptr;
ggml_tensor* lora_v_up = NULL; ggml_tensor* lora_v_up = nullptr;
lora_q_down = to_f32(compute_ctx, lora_tensors[split_q_d_name]); lora_q_down = to_f32(compute_ctx, lora_tensors[split_q_d_name]);
@ -633,15 +633,15 @@ struct LoraModel : public GGMLRunner {
auto split_v_alpha_name = full_key + "attn.to_v" + ".alpha"; auto split_v_alpha_name = full_key + "attn.to_v" + ".alpha";
auto split_m_alpha_name = full_key + "proj_mlp" + ".alpha"; auto split_m_alpha_name = full_key + "proj_mlp" + ".alpha";
ggml_tensor* lora_q_down = NULL; ggml_tensor* lora_q_down = nullptr;
ggml_tensor* lora_q_up = NULL; ggml_tensor* lora_q_up = nullptr;
ggml_tensor* lora_k_down = NULL; ggml_tensor* lora_k_down = nullptr;
ggml_tensor* lora_k_up = NULL; ggml_tensor* lora_k_up = nullptr;
ggml_tensor* lora_v_down = NULL; ggml_tensor* lora_v_down = nullptr;
ggml_tensor* lora_v_up = NULL; ggml_tensor* lora_v_up = nullptr;
ggml_tensor* lora_m_down = NULL; ggml_tensor* lora_m_down = nullptr;
ggml_tensor* lora_m_up = NULL; ggml_tensor* lora_m_up = nullptr;
lora_q_up = to_f32(compute_ctx, lora_tensors[split_q_u_name]); lora_q_up = to_f32(compute_ctx, lora_tensors[split_q_u_name]);
@ -809,7 +809,7 @@ struct LoraModel : public GGMLRunner {
} }
} }
if (lora_up == NULL || lora_down == NULL) { if (lora_up == nullptr || lora_down == nullptr) {
continue; continue;
} }
// calc_scale // calc_scale

View File

@ -13,10 +13,10 @@ namespace LTXV {
public: public:
CausalConv3d(int64_t in_channels, CausalConv3d(int64_t in_channels,
int64_t out_channels, int64_t out_channels,
int kernel_size = 3, int kernel_size = 3,
std::tuple<int> stride = {1, 1, 1}, std::tuple<int, int, int> stride = {1, 1, 1},
int dilation = 1, int dilation = 1,
bool bias = true) { bool bias = true) {
time_kernel_size = kernel_size / 2; time_kernel_size = kernel_size / 2;
blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv3d(in_channels, blocks["conv"] = std::shared_ptr<GGMLBlock>(new Conv3d(in_channels,
out_channels, out_channels,

View File

@ -1,6 +1,8 @@
#ifndef __MMDIT_HPP__ #ifndef __MMDIT_HPP__
#define __MMDIT_HPP__ #define __MMDIT_HPP__
#include <memory>
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
#include "model.h" #include "model.h"
@ -208,8 +210,8 @@ public:
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x) { struct ggml_tensor* x) {
auto qkv = pre_attention(ctx, x); auto qkv = pre_attention(ctx, x);
x = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], num_heads, NULL, false, false, true); // [N, n_token, dim] x = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, false, true); // [N, n_token, dim]
x = post_attention(ctx, x); // [N, n_token, dim] x = post_attention(ctx, x); // [N, n_token, dim]
return x; return x;
} }
}; };
@ -347,7 +349,7 @@ public:
auto attn_in = modulate(ctx, norm1->forward(ctx, x), shift_msa, scale_msa); auto attn_in = modulate(ctx, norm1->forward(ctx, x), shift_msa, scale_msa);
auto qkv = attn->pre_attention(ctx, attn_in); auto qkv = attn->pre_attention(ctx, attn_in);
return {qkv, {NULL, NULL, NULL, NULL, NULL}}; return {qkv, {nullptr, nullptr, nullptr, nullptr, nullptr}};
} }
} }
@ -439,8 +441,8 @@ public:
auto qkv2 = std::get<1>(qkv_intermediates); auto qkv2 = std::get<1>(qkv_intermediates);
auto intermediates = std::get<2>(qkv_intermediates); auto intermediates = std::get<2>(qkv_intermediates);
auto attn_out = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], num_heads, NULL, false, false, flash_attn); // [N, n_token, dim] auto attn_out = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, false, flash_attn); // [N, n_token, dim]
auto attn2_out = ggml_nn_attention_ext(ctx, backend, qkv2[0], qkv2[1], qkv2[2], num_heads, NULL, false, false, flash_attn); // [N, n_token, dim] auto attn2_out = ggml_nn_attention_ext(ctx, backend, qkv2[0], qkv2[1], qkv2[2], num_heads, nullptr, false, false, flash_attn); // [N, n_token, dim]
x = post_attention_x(ctx, x = post_attention_x(ctx,
attn_out, attn_out,
attn2_out, attn2_out,
@ -456,7 +458,7 @@ public:
auto qkv = qkv_intermediates.first; auto qkv = qkv_intermediates.first;
auto intermediates = qkv_intermediates.second; auto intermediates = qkv_intermediates.second;
auto attn_out = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], num_heads, NULL, false, false, flash_attn); // [N, n_token, dim] auto attn_out = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], num_heads, nullptr, false, false, flash_attn); // [N, n_token, dim]
x = post_attention(ctx, x = post_attention(ctx,
attn_out, attn_out,
intermediates[0], intermediates[0],
@ -502,8 +504,8 @@ block_mixing(struct ggml_context* ctx,
qkv.push_back(ggml_concat(ctx, context_qkv[i], x_qkv[i], 1)); qkv.push_back(ggml_concat(ctx, context_qkv[i], x_qkv[i], 1));
} }
auto attn = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], x_block->num_heads, NULL, false, false, flash_attn); // [N, n_context + n_token, hidden_size] auto attn = ggml_nn_attention_ext(ctx, backend, qkv[0], qkv[1], qkv[2], x_block->num_heads, nullptr, false, false, flash_attn); // [N, n_context + n_token, hidden_size]
attn = ggml_cont(ctx, ggml_permute(ctx, attn, 0, 2, 1, 3)); // [n_context + n_token, N, hidden_size] attn = ggml_cont(ctx, ggml_permute(ctx, attn, 0, 2, 1, 3)); // [n_context + n_token, N, hidden_size]
auto context_attn = ggml_view_3d(ctx, auto context_attn = ggml_view_3d(ctx,
attn, attn,
attn->ne[0], attn->ne[0],
@ -532,7 +534,7 @@ block_mixing(struct ggml_context* ctx,
context_intermediates[3], context_intermediates[3],
context_intermediates[4]); context_intermediates[4]);
} else { } else {
context = NULL; context = nullptr;
} }
if (x_block->self_attn) { if (x_block->self_attn) {
@ -645,7 +647,7 @@ protected:
std::string qk_norm; std::string qk_norm;
bool flash_attn = false; bool flash_attn = false;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") override {
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1); params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
} }
@ -823,8 +825,8 @@ public:
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* t, struct ggml_tensor* t,
struct ggml_tensor* y = NULL, struct ggml_tensor* y = nullptr,
struct ggml_tensor* context = NULL, struct ggml_tensor* context = nullptr,
std::vector<int> skip_layers = std::vector<int>()) { std::vector<int> skip_layers = std::vector<int>()) {
// Forward pass of DiT. // Forward pass of DiT.
// x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) // x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
@ -843,14 +845,14 @@ public:
x = ggml_add(ctx, patch_embed, pos_embed); // [N, H*W, hidden_size] x = ggml_add(ctx, patch_embed, pos_embed); // [N, H*W, hidden_size]
auto c = t_embedder->forward(ctx, t); // [N, hidden_size] auto c = t_embedder->forward(ctx, t); // [N, hidden_size]
if (y != NULL && adm_in_channels != -1) { if (y != nullptr && adm_in_channels != -1) {
auto y_embedder = std::dynamic_pointer_cast<VectorEmbedder>(blocks["y_embedder"]); auto y_embedder = std::dynamic_pointer_cast<VectorEmbedder>(blocks["y_embedder"]);
y = y_embedder->forward(ctx, y); // [N, hidden_size] y = y_embedder->forward(ctx, y); // [N, hidden_size]
c = ggml_add(ctx, c, y); c = ggml_add(ctx, c, y);
} }
if (context != NULL) { if (context != nullptr) {
auto context_embedder = std::dynamic_pointer_cast<Linear>(blocks["context_embedder"]); auto context_embedder = std::dynamic_pointer_cast<Linear>(blocks["context_embedder"]);
context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536] context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536]
@ -875,7 +877,7 @@ struct MMDiTRunner : public GGMLRunner {
mmdit.init(params_ctx, tensor_types, prefix); mmdit.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "mmdit"; return "mmdit";
} }
@ -913,8 +915,8 @@ struct MMDiTRunner : public GGMLRunner {
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* y, struct ggml_tensor* y,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL, struct ggml_context* output_ctx = nullptr,
std::vector<int> skip_layers = std::vector<int>()) { std::vector<int> skip_layers = std::vector<int>()) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
@ -930,11 +932,11 @@ struct MMDiTRunner : public GGMLRunner {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
// cpu f16: pass // cpu f16: pass
@ -955,7 +957,7 @@ struct MMDiTRunner : public GGMLRunner {
ggml_set_f32(y, 0.01f); ggml_set_f32(y, 0.01f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, timesteps, context, y, &out, work_ctx); compute(8, x, timesteps, context, y, &out, work_ctx);
@ -970,7 +972,7 @@ struct MMDiTRunner : public GGMLRunner {
// ggml_backend_t backend = ggml_backend_cuda_init(0); // ggml_backend_t backend = ggml_backend_cuda_init(0);
ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_t backend = ggml_backend_cpu_init();
ggml_type model_data_type = GGML_TYPE_F16; ggml_type model_data_type = GGML_TYPE_F16;
std::shared_ptr<MMDiTRunner> mmdit = std::shared_ptr<MMDiTRunner>(new MMDiTRunner(backend, false, false)); std::shared_ptr<MMDiTRunner> mmdit = std::make_shared<MMDiTRunner>(backend, false, false);
{ {
LOG_INFO("loading from '%s'", file_path.c_str()); LOG_INFO("loading from '%s'", file_path.c_str());

View File

@ -1,7 +1,7 @@
#include <stdarg.h>
#include <algorithm> #include <algorithm>
#include <atomic> #include <atomic>
#include <chrono> #include <chrono>
#include <cstdarg>
#include <fstream> #include <fstream>
#include <functional> #include <functional>
#include <mutex> #include <mutex>
@ -869,7 +869,6 @@ uint16_t f8_e5m2_to_f16(uint8_t fp8) {
} }
if (exponent == 0) { // subnormal numbers if (exponent == 0) { // subnormal numbers
fp16_exponent = 0;
fp16_mantissa = (mantissa << 8); fp16_mantissa = (mantissa << 8);
return fp16_sign | fp16_mantissa; return fp16_sign | fp16_mantissa;
} }
@ -948,7 +947,7 @@ void convert_tensor(void* src,
ggml_fp16_to_fp32_row((ggml_fp16_t*)src, (float*)dst, n); ggml_fp16_to_fp32_row((ggml_fp16_t*)src, (float*)dst, n);
} else { } else {
auto qtype = ggml_get_type_traits(src_type); auto qtype = ggml_get_type_traits(src_type);
if (qtype->to_float == NULL) { if (qtype->to_float == nullptr) {
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available",
ggml_type_name(src_type))); ggml_type_name(src_type)));
} }
@ -958,7 +957,7 @@ void convert_tensor(void* src,
// src_type == GGML_TYPE_F16 => dst_type is quantized // src_type == GGML_TYPE_F16 => dst_type is quantized
// src_type is quantized => dst_type == GGML_TYPE_F16 or dst_type is quantized // src_type is quantized => dst_type == GGML_TYPE_F16 or dst_type is quantized
auto qtype = ggml_get_type_traits(src_type); auto qtype = ggml_get_type_traits(src_type);
if (qtype->to_float == NULL) { if (qtype->to_float == nullptr) {
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available",
ggml_type_name(src_type))); ggml_type_name(src_type)));
} }
@ -1020,7 +1019,7 @@ std::map<char, int> unicode_to_byte() {
bool is_zip_file(const std::string& file_path) { bool is_zip_file(const std::string& file_path) {
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r'); struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
if (zip == NULL) { if (zip == nullptr) {
return false; return false;
} }
zip_close(zip); zip_close(zip);
@ -1116,8 +1115,8 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s
file_paths_.push_back(file_path); file_paths_.push_back(file_path);
size_t file_index = file_paths_.size() - 1; size_t file_index = file_paths_.size() - 1;
gguf_context* ctx_gguf_ = NULL; gguf_context* ctx_gguf_ = nullptr;
ggml_context* ctx_meta_ = NULL; ggml_context* ctx_meta_ = nullptr;
ctx_gguf_ = gguf_init_from_file(file_path.c_str(), {true, &ctx_meta_}); ctx_gguf_ = gguf_init_from_file(file_path.c_str(), {true, &ctx_meta_});
if (!ctx_gguf_) { if (!ctx_gguf_) {
@ -1726,7 +1725,7 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
size_t file_index = file_paths_.size() - 1; size_t file_index = file_paths_.size() - 1;
struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r'); struct zip_t* zip = zip_open(file_path.c_str(), 0, 'r');
if (zip == NULL) { if (zip == nullptr) {
LOG_ERROR("failed to open '%s'", file_path.c_str()); LOG_ERROR("failed to open '%s'", file_path.c_str());
return false; return false;
} }
@ -1739,7 +1738,7 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
if (pos != std::string::npos) { if (pos != std::string::npos) {
std::string dir = name.substr(0, pos); std::string dir = name.substr(0, pos);
printf("ZIP %d, name = %s, dir = %s \n", i, name.c_str(), dir.c_str()); printf("ZIP %d, name = %s, dir = %s \n", i, name.c_str(), dir.c_str());
void* pkl_data = NULL; void* pkl_data = nullptr;
size_t pkl_size; size_t pkl_size;
zip_entry_read(zip, &pkl_data, &pkl_size); zip_entry_read(zip, &pkl_data, &pkl_size);
@ -2144,10 +2143,10 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
for (int i = 0; i < n_threads; ++i) { for (int i = 0; i < n_threads; ++i) {
workers.emplace_back([&, file_path, is_zip]() { workers.emplace_back([&, file_path, is_zip]() {
std::ifstream file; std::ifstream file;
struct zip_t* zip = NULL; struct zip_t* zip = nullptr;
if (is_zip) { if (is_zip) {
zip = zip_open(file_path.c_str(), 0, 'r'); zip = zip_open(file_path.c_str(), 0, 'r');
if (zip == NULL) { if (zip == nullptr) {
LOG_ERROR("failed to open zip '%s'", file_path.c_str()); LOG_ERROR("failed to open zip '%s'", file_path.c_str());
failed = true; failed = true;
return; return;
@ -2172,7 +2171,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
} }
const TensorStorage& tensor_storage = *file_tensors[idx]; const TensorStorage& tensor_storage = *file_tensors[idx];
ggml_tensor* dst_tensor = NULL; ggml_tensor* dst_tensor = nullptr;
t0 = ggml_time_ms(); t0 = ggml_time_ms();
@ -2182,7 +2181,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
break; break;
} }
if (dst_tensor == NULL) { if (dst_tensor == nullptr) {
t1 = ggml_time_ms(); t1 = ggml_time_ms();
read_time_ms.fetch_add(t1 - t0); read_time_ms.fetch_add(t1 - t0);
continue; continue;
@ -2191,7 +2190,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
size_t nbytes_to_read = tensor_storage.nbytes_to_read(); size_t nbytes_to_read = tensor_storage.nbytes_to_read();
auto read_data = [&](char* buf, size_t n) { auto read_data = [&](char* buf, size_t n) {
if (zip != NULL) { if (zip != nullptr) {
zip_entry_openbyindex(zip, tensor_storage.index_in_zip); zip_entry_openbyindex(zip, tensor_storage.index_in_zip);
size_t entry_size = zip_entry_size(zip); size_t entry_size = zip_entry_size(zip);
if (entry_size != n) { if (entry_size != n) {
@ -2215,7 +2214,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
} }
}; };
if (dst_tensor->buffer == NULL || ggml_backend_buffer_is_host(dst_tensor->buffer)) { if (dst_tensor->buffer == nullptr || ggml_backend_buffer_is_host(dst_tensor->buffer)) {
if (tensor_storage.type == dst_tensor->type) { if (tensor_storage.type == dst_tensor->type) {
GGML_ASSERT(ggml_nbytes(dst_tensor) == tensor_storage.nbytes()); GGML_ASSERT(ggml_nbytes(dst_tensor) == tensor_storage.nbytes());
if (tensor_storage.is_f64 || tensor_storage.is_i64) { if (tensor_storage.is_f64 || tensor_storage.is_i64) {
@ -2317,7 +2316,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
} }
} }
} }
if (zip != NULL) { if (zip != nullptr) {
zip_close(zip); zip_close(zip);
} }
}); });
@ -2507,7 +2506,7 @@ bool ModelLoader::save_to_gguf_file(const std::string& file_path, ggml_type type
mem_size += tensor_storages.size() * ggml_tensor_overhead(); mem_size += tensor_storages.size() * ggml_tensor_overhead();
mem_size += get_params_mem_size(backend, type); mem_size += get_params_mem_size(backend, type);
LOG_INFO("model tensors mem size: %.2fMB", mem_size / 1024.f / 1024.f); LOG_INFO("model tensors mem size: %.2fMB", mem_size / 1024.f / 1024.f);
ggml_context* ggml_ctx = ggml_init({mem_size, NULL, false}); ggml_context* ggml_ctx = ggml_init({mem_size, nullptr, false});
gguf_context* gguf_ctx = gguf_init_empty(); gguf_context* gguf_ctx = gguf_init_empty();
@ -2533,7 +2532,7 @@ bool ModelLoader::save_to_gguf_file(const std::string& file_path, ggml_type type
std::lock_guard<std::mutex> lock(tensor_mutex); std::lock_guard<std::mutex> lock(tensor_mutex);
ggml_tensor* tensor = ggml_new_tensor(ggml_ctx, tensor_type, tensor_storage.n_dims, tensor_storage.ne); ggml_tensor* tensor = ggml_new_tensor(ggml_ctx, tensor_type, tensor_storage.n_dims, tensor_storage.ne);
if (tensor == NULL) { if (tensor == nullptr) {
LOG_ERROR("ggml_new_tensor failed"); LOG_ERROR("ggml_new_tensor failed");
return false; return false;
} }
@ -2566,7 +2565,7 @@ bool ModelLoader::save_to_gguf_file(const std::string& file_path, ggml_type type
int64_t ModelLoader::get_params_mem_size(ggml_backend_t backend, ggml_type type) { int64_t ModelLoader::get_params_mem_size(ggml_backend_t backend, ggml_type type) {
size_t alignment = 128; size_t alignment = 128;
if (backend != NULL) { if (backend != nullptr) {
alignment = ggml_backend_get_alignment(backend); alignment = ggml_backend_get_alignment(backend);
} }
int64_t mem_size = 0; int64_t mem_size = 0;
@ -2596,7 +2595,7 @@ bool convert(const char* input_path, const char* vae_path, const char* output_pa
return false; return false;
} }
if (vae_path != NULL && strlen(vae_path) > 0) { if (vae_path != nullptr && strlen(vae_path) > 0) {
if (!model_loader.init_from_file(vae_path, "vae.")) { if (!model_loader.init_from_file(vae_path, "vae.")) {
LOG_ERROR("init model loader from file failed: '%s'", vae_path); LOG_ERROR("init model loader from file failed: '%s'", vae_path);
return false; return false;

View File

@ -8,6 +8,7 @@
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <tuple> #include <tuple>
#include <utility>
#include <vector> #include <vector>
#include "ggml-backend.h" #include "ggml-backend.h"
@ -140,8 +141,8 @@ struct TensorStorage {
TensorStorage() = default; TensorStorage() = default;
TensorStorage(const std::string& name, ggml_type type, const int64_t* ne, int n_dims, size_t file_index, size_t offset = 0) TensorStorage(std::string name, ggml_type type, const int64_t* ne, int n_dims, size_t file_index, size_t offset = 0)
: name(name), type(type), n_dims(n_dims), file_index(file_index), offset(offset) { : name(std::move(name)), type(type), n_dims(n_dims), file_index(file_index), offset(offset) {
for (int i = 0; i < n_dims; i++) { for (int i = 0; i < n_dims; i++) {
this->ne[i] = ne[i]; this->ne[i] = ne[i];
} }

View File

@ -472,8 +472,8 @@ public:
struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds); struct ggml_tensor* prompt_embeds_d = to_backend(prompt_embeds);
struct ggml_tensor* id_embeds_d = to_backend(id_embeds); struct ggml_tensor* id_embeds_d = to_backend(id_embeds);
struct ggml_tensor* left = NULL; struct ggml_tensor* left = nullptr;
struct ggml_tensor* right = NULL; struct ggml_tensor* right = nullptr;
for (int i = 0; i < class_tokens_mask.size(); i++) { for (int i = 0; i < class_tokens_mask.size(); i++) {
if (class_tokens_mask[i]) { if (class_tokens_mask[i]) {
// printf(" 1,"); // printf(" 1,");
@ -528,7 +528,7 @@ public:
} }
} }
} }
struct ggml_tensor* updated_prompt_embeds = NULL; struct ggml_tensor* updated_prompt_embeds = nullptr;
if (pm_version == PM_VERSION_1) if (pm_version == PM_VERSION_1)
updated_prompt_embeds = id_encoder.forward(ctx0, updated_prompt_embeds = id_encoder.forward(ctx0,
runtime_backend, runtime_backend,
@ -638,7 +638,7 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
pos = tensors.find("pmid.id_embeds"); pos = tensors.find("pmid.id_embeds");
if (pos != tensors.end()) if (pos != tensors.end())
return pos->second; return pos->second;
return NULL; return nullptr;
} }
}; };

View File

@ -7,7 +7,7 @@
void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) { void convolve(struct ggml_tensor* input, struct ggml_tensor* output, struct ggml_tensor* kernel, int padding) {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512 params.mem_size = 80 * input->ne[0] * input->ne[1]; // 20M for 512x512
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* ctx0 = ggml_init(params); struct ggml_context* ctx0 = ggml_init(params);
struct ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1); struct ggml_tensor* kernel_fp16 = ggml_new_tensor_4d(ctx0, GGML_TYPE_F16, kernel->ne[0], kernel->ne[1], 1, 1);
@ -165,7 +165,7 @@ void threshold_hystersis(struct ggml_tensor* img, float high_threshold, float lo
bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) { bool preprocess_canny(sd_image_t img, float high_threshold, float low_threshold, float weak, float strong, bool inverse) {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512 params.mem_size = static_cast<size_t>(40 * img.width * img.height); // 10MB for 512x512
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);

View File

@ -1,6 +1,8 @@
#ifndef __QWEN_IMAGE_HPP__ #ifndef __QWEN_IMAGE_HPP__
#define __QWEN_IMAGE_HPP__ #define __QWEN_IMAGE_HPP__
#include <memory>
#include "common.hpp" #include "common.hpp"
#include "flux.hpp" #include "flux.hpp"
#include "ggml_extend.hpp" #include "ggml_extend.hpp"
@ -539,7 +541,7 @@ namespace Qwen {
qwen_image.init(params_ctx, tensor_types, prefix); qwen_image.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "qwen_image"; return "qwen_image";
} }
@ -577,7 +579,7 @@ namespace Qwen {
auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, qwen_image_params.axes_dim_sum / 2, pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, qwen_image_params.axes_dim_sum / 2, pos_len);
// pe->data = pe_vec.data(); // pe->data = pe_vec.data();
// print_ggml_tensor(pe, true, "pe"); // print_ggml_tensor(pe, true, "pe");
// pe->data = NULL; // pe->data = nullptr;
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
struct ggml_tensor* out = qwen_image.forward(compute_ctx, struct ggml_tensor* out = qwen_image.forward(compute_ctx,
@ -599,8 +601,8 @@ namespace Qwen {
struct ggml_tensor* context, struct ggml_tensor* context,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size] // context: [N, max_position, hidden_size]
@ -614,11 +616,11 @@ namespace Qwen {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
// auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1); // auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 16, 16, 16, 1);
@ -634,7 +636,7 @@ namespace Qwen {
auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin"); auto context = load_tensor_from_file(work_ctx, "./qwen_image_context.bin");
print_ggml_tensor(context); print_ggml_tensor(context);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, timesteps, context, {}, false, &out, work_ctx); compute(8, x, timesteps, context, {}, false, &out, work_ctx);
@ -666,12 +668,12 @@ namespace Qwen {
} }
} }
std::shared_ptr<QwenImageRunner> qwen_image = std::shared_ptr<QwenImageRunner>(new QwenImageRunner(backend, std::shared_ptr<QwenImageRunner> qwen_image = std::make_shared<QwenImageRunner>(backend,
false, false,
tensor_types, tensor_types,
"model.diffusion_model", "model.diffusion_model",
VERSION_QWEN_IMAGE, VERSION_QWEN_IMAGE,
true)); true);
qwen_image->alloc_params_buffer(); qwen_image->alloc_params_buffer();
std::map<std::string, ggml_tensor*> tensors; std::map<std::string, ggml_tensor*> tensors;

View File

@ -5,11 +5,13 @@
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <map> #include <map>
#include <memory>
#include <optional> #include <optional>
#include <regex> #include <regex>
#include <set> #include <set>
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <utility>
#include <vector> #include <vector>
#include "clip.hpp" #include "clip.hpp"
@ -589,7 +591,7 @@ namespace Qwen {
int64_t window_size, int64_t window_size,
std::set<int> fullatt_block_indexes = {7, 15, 23, 31}, std::set<int> fullatt_block_indexes = {7, 15, 23, 31},
float eps = 1e-6f) float eps = 1e-6f)
: num_layers(num_layers), fullatt_block_indexes(fullatt_block_indexes), spatial_merge_size(spatial_merge_size) { : num_layers(num_layers), fullatt_block_indexes(std::move(fullatt_block_indexes)), spatial_merge_size(spatial_merge_size) {
blocks["patch_embed"] = std::shared_ptr<GGMLBlock>(new Qwen2_5_VisionPatchEmbed(llama_cpp_style, blocks["patch_embed"] = std::shared_ptr<GGMLBlock>(new Qwen2_5_VisionPatchEmbed(llama_cpp_style,
patch_size, patch_size,
temporal_patch_size, temporal_patch_size,
@ -949,7 +951,7 @@ namespace Qwen {
model.init(params_ctx, tensor_types, prefix); model.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "qwenvl2.5"; return "qwenvl2.5";
} }
@ -1011,7 +1013,7 @@ namespace Qwen {
struct ggml_tensor* input_ids, struct ggml_tensor* input_ids,
std::vector<std::pair<int, ggml_tensor*>> image_embeds, std::vector<std::pair<int, ggml_tensor*>> image_embeds,
ggml_tensor** output, ggml_tensor** output,
ggml_context* output_ctx = NULL) { ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(input_ids, image_embeds); return build_graph(input_ids, image_embeds);
}; };
@ -1162,7 +1164,7 @@ namespace Qwen {
auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, head_dim / 2, pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, head_dim / 2, pos_len);
// pe->data = pe_vec.data(); // pe->data = pe_vec.data();
// print_ggml_tensor(pe); // print_ggml_tensor(pe);
// pe->data = NULL; // pe->data = nullptr;
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
struct ggml_tensor* hidden_states = vision_forward(compute_ctx, struct ggml_tensor* hidden_states = vision_forward(compute_ctx,
@ -1180,7 +1182,7 @@ namespace Qwen {
void encode_image(const int n_threads, void encode_image(const int n_threads,
struct ggml_tensor* image, struct ggml_tensor* image,
ggml_tensor** output, ggml_tensor** output,
ggml_context* output_ctx = NULL) { ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_encode_image_graph(image); return build_encode_image_graph(image);
}; };
@ -1246,11 +1248,11 @@ namespace Qwen {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1GB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
bool test_vit = true; bool test_vit = true;
bool test_decoder_with_vit = true; bool test_decoder_with_vit = true;
@ -1259,7 +1261,7 @@ namespace Qwen {
{ {
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin"); auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
print_ggml_tensor(image, false, "image"); print_ggml_tensor(image, false, "image");
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
model.encode_image(8, image, &out, work_ctx); model.encode_image(8, image, &out, work_ctx);
@ -1295,7 +1297,7 @@ namespace Qwen {
} }
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
model.compute(8, input_ids, image_embeds, &out, work_ctx); model.compute(8, input_ids, image_embeds, &out, work_ctx);
@ -1308,7 +1310,7 @@ namespace Qwen {
// ggml_set_f32(image, 0.f); // ggml_set_f32(image, 0.f);
auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin"); auto image = load_tensor_from_file(work_ctx, "qwen2vl_normalized.bin");
print_ggml_tensor(image, false, "image"); print_ggml_tensor(image, false, "image");
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
model.encode_image(8, image, &out, work_ctx); model.encode_image(8, image, &out, work_ctx);
@ -1330,7 +1332,7 @@ namespace Qwen {
} }
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
model.compute(8, input_ids, {}, &out, work_ctx); model.compute(8, input_ids, {}, &out, work_ctx);
@ -1361,11 +1363,11 @@ namespace Qwen {
} }
} }
std::shared_ptr<Qwen2_5_VLEmbedder> qwenvl = std::shared_ptr<Qwen2_5_VLEmbedder>(new Qwen2_5_VLEmbedder(backend, std::shared_ptr<Qwen2_5_VLEmbedder> qwenvl = std::make_shared<Qwen2_5_VLEmbedder>(backend,
false, false,
tensor_types, tensor_types,
"qwen2vl", "qwen2vl",
true)); true);
qwenvl->alloc_params_buffer(); qwenvl->alloc_params_buffer();
std::map<std::string, ggml_tensor*> tensors; std::map<std::string, ggml_tensor*> tensors;

View File

@ -15,11 +15,11 @@ private:
std::default_random_engine generator; std::default_random_engine generator;
public: public:
void manual_seed(uint64_t seed) { void manual_seed(uint64_t seed) override {
generator.seed((unsigned int)seed); generator.seed((unsigned int)seed);
} }
std::vector<float> randn(uint32_t n) { std::vector<float> randn(uint32_t n) override {
std::vector<float> result; std::vector<float> result;
float mean = 0.0; float mean = 0.0;
float stddev = 1.0; float stddev = 1.0;

View File

@ -93,12 +93,12 @@ public:
this->offset = 0; this->offset = 0;
} }
void manual_seed(uint64_t seed) { void manual_seed(uint64_t seed) override {
this->seed = seed; this->seed = seed;
this->offset = 0; this->offset = 0;
} }
std::vector<float> randn(uint32_t n) { std::vector<float> randn(uint32_t n) override {
std::vector<std::vector<uint32_t>> counter(4, std::vector<uint32_t>(n, 0)); std::vector<std::vector<uint32_t>> counter(4, std::vector<uint32_t>(n, 0));
for (uint32_t i = 0; i < n; i++) { for (uint32_t i = 0; i < n; i++) {
counter[0][i] = this->offset; counter[0][i] = this->offset;

View File

@ -82,10 +82,10 @@ void calculate_alphas_cumprod(float* alphas_cumprod,
class StableDiffusionGGML { class StableDiffusionGGML {
public: public:
ggml_backend_t backend = NULL; // general backend ggml_backend_t backend = nullptr; // general backend
ggml_backend_t clip_backend = NULL; ggml_backend_t clip_backend = nullptr;
ggml_backend_t control_net_backend = NULL; ggml_backend_t control_net_backend = nullptr;
ggml_backend_t vae_backend = NULL; ggml_backend_t vae_backend = nullptr;
SDVersion version; SDVersion version;
bool vae_decode_only = false; bool vae_decode_only = false;
@ -101,7 +101,7 @@ public:
std::shared_ptr<DiffusionModel> high_noise_diffusion_model; std::shared_ptr<DiffusionModel> high_noise_diffusion_model;
std::shared_ptr<VAE> first_stage_model; std::shared_ptr<VAE> first_stage_model;
std::shared_ptr<TinyAutoEncoder> tae_first_stage; std::shared_ptr<TinyAutoEncoder> tae_first_stage;
std::shared_ptr<ControlNet> control_net = NULL; std::shared_ptr<ControlNet> control_net = nullptr;
std::shared_ptr<PhotoMakerIDEncoder> pmid_model; std::shared_ptr<PhotoMakerIDEncoder> pmid_model;
std::shared_ptr<LoraModel> pmid_lora; std::shared_ptr<LoraModel> pmid_lora;
std::shared_ptr<PhotoMakerIDEmbed> pmid_id_embeds; std::shared_ptr<PhotoMakerIDEmbed> pmid_id_embeds;
@ -528,7 +528,7 @@ public:
// first_stage_model->get_param_tensors(tensors, "first_stage_model."); // first_stage_model->get_param_tensors(tensors, "first_stage_model.");
if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) {
ggml_backend_t controlnet_backend = NULL; ggml_backend_t controlnet_backend = nullptr;
if (sd_ctx_params->keep_control_net_on_cpu && !ggml_backend_is_cpu(backend)) { if (sd_ctx_params->keep_control_net_on_cpu && !ggml_backend_is_cpu(backend)) {
LOG_DEBUG("ControlNet: Using CPU backend"); LOG_DEBUG("ControlNet: Using CPU backend");
controlnet_backend = ggml_backend_cpu_init(); controlnet_backend = ggml_backend_cpu_init();
@ -584,11 +584,11 @@ public:
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024) * 1024; // 10M params.mem_size = static_cast<size_t>(10 * 1024) * 1024; // 10M
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
// LOG_DEBUG("mem_size %u ", params.mem_size); // LOG_DEBUG("mem_size %u ", params.mem_size);
struct ggml_context* ctx = ggml_init(params); // for alphas_cumprod and is_using_v_parameterization check struct ggml_context* ctx = ggml_init(params); // for alphas_cumprod and is_using_v_parameterization check
GGML_ASSERT(ctx != NULL); GGML_ASSERT(ctx != nullptr);
ggml_tensor* alphas_cumprod_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, TIMESTEPS); ggml_tensor* alphas_cumprod_tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, TIMESTEPS);
calculate_alphas_cumprod((float*)alphas_cumprod_tensor->data); calculate_alphas_cumprod((float*)alphas_cumprod_tensor->data);
@ -861,8 +861,8 @@ public:
struct ggml_tensor* timesteps = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 1); struct ggml_tensor* timesteps = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 1);
ggml_set_f32(timesteps, 999); ggml_set_f32(timesteps, 999);
struct ggml_tensor* concat = is_inpaint ? ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 5, 1) : NULL; struct ggml_tensor* concat = is_inpaint ? ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 5, 1) : nullptr;
if (concat != NULL) { if (concat != nullptr) {
ggml_set_f32(concat, 0); ggml_set_f32(concat, 0);
} }
@ -976,7 +976,7 @@ public:
ggml_tensor* prompts_embeds, ggml_tensor* prompts_embeds,
ggml_tensor* id_embeds, ggml_tensor* id_embeds,
std::vector<bool>& class_tokens_mask) { std::vector<bool>& class_tokens_mask) {
ggml_tensor* res = NULL; ggml_tensor* res = nullptr;
pmid_model->compute(n_threads, init_img, prompts_embeds, id_embeds, class_tokens_mask, &res, work_ctx); pmid_model->compute(n_threads, init_img, prompts_embeds, id_embeds, class_tokens_mask, &res, work_ctx);
return res; return res;
} }
@ -986,7 +986,7 @@ public:
bool return_pooled = true, bool return_pooled = true,
int clip_skip = -1, int clip_skip = -1,
bool zero_out_masked = false) { bool zero_out_masked = false) {
ggml_tensor* output = NULL; ggml_tensor* output = nullptr;
if (zero_out_masked) { if (zero_out_masked) {
if (return_pooled) { if (return_pooled) {
output = ggml_new_tensor_1d(work_ctx, output = ggml_new_tensor_1d(work_ctx,
@ -1004,12 +1004,12 @@ public:
sd_image_f32_t image = sd_image_t_to_sd_image_f32_t(init_image); sd_image_f32_t image = sd_image_t_to_sd_image_f32_t(init_image);
sd_image_f32_t resized_image = clip_preprocess(image, clip_vision->vision_model.image_size, clip_vision->vision_model.image_size); sd_image_f32_t resized_image = clip_preprocess(image, clip_vision->vision_model.image_size, clip_vision->vision_model.image_size);
free(image.data); free(image.data);
image.data = NULL; image.data = nullptr;
ggml_tensor* pixel_values = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, resized_image.width, resized_image.height, 3, 1); ggml_tensor* pixel_values = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, resized_image.width, resized_image.height, 3, 1);
sd_image_f32_to_tensor(resized_image, pixel_values, false); sd_image_f32_to_tensor(resized_image, pixel_values, false);
free(resized_image.data); free(resized_image.data);
resized_image.data = NULL; resized_image.data = nullptr;
// print_ggml_tensor(pixel_values); // print_ggml_tensor(pixel_values);
clip_vision->compute(n_threads, pixel_values, return_pooled, clip_skip, &output, work_ctx); clip_vision->compute(n_threads, pixel_values, return_pooled, clip_skip, &output, work_ctx);
@ -1031,7 +1031,7 @@ public:
struct ggml_tensor* c_crossattn = get_clip_vision_output(work_ctx, init_image, true, -1, zero_out_masked); struct ggml_tensor* c_crossattn = get_clip_vision_output(work_ctx, init_image, true, -1, zero_out_masked);
// c_concat // c_concat
struct ggml_tensor* c_concat = NULL; struct ggml_tensor* c_concat = nullptr;
{ {
if (zero_out_masked) { if (zero_out_masked) {
c_concat = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width / 8, height / 8, 4, 1); c_concat = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width / 8, height / 8, 4, 1);
@ -1043,10 +1043,10 @@ public:
sd_image_f32_t image = sd_image_t_to_sd_image_f32_t(init_image); sd_image_f32_t image = sd_image_t_to_sd_image_f32_t(init_image);
sd_image_f32_t resized_image = resize_sd_image_f32_t(image, width, height); sd_image_f32_t resized_image = resize_sd_image_f32_t(image, width, height);
free(image.data); free(image.data);
image.data = NULL; image.data = nullptr;
sd_image_f32_to_tensor(resized_image, init_img, false); sd_image_f32_to_tensor(resized_image, init_img, false);
free(resized_image.data); free(resized_image.data);
resized_image.data = NULL; resized_image.data = nullptr;
} else { } else {
sd_image_to_tensor(init_image, init_img); sd_image_to_tensor(init_image, init_img);
} }
@ -1063,7 +1063,7 @@ public:
} }
// y // y
struct ggml_tensor* y = NULL; struct ggml_tensor* y = nullptr;
{ {
y = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, diffusion_model->get_adm_in_channels()); y = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, diffusion_model->get_adm_in_channels());
int out_dim = 256; int out_dim = 256;
@ -1082,7 +1082,7 @@ public:
if (diffusion_model->get_desc() == "Wan2.2-TI2V-5B") { if (diffusion_model->get_desc() == "Wan2.2-TI2V-5B") {
auto new_timesteps = std::vector<float>(init_latent->ne[2], timesteps[0]); auto new_timesteps = std::vector<float>(init_latent->ne[2], timesteps[0]);
if (denoise_mask != NULL) { if (denoise_mask != nullptr) {
float value = ggml_tensor_get_f32(denoise_mask, 0, 0, 0, 0); float value = ggml_tensor_get_f32(denoise_mask, 0, 0, 0, 0);
if (value == 0.f) { if (value == 0.f) {
new_timesteps[0] = 0.f; new_timesteps[0] = 0.f;
@ -1129,8 +1129,8 @@ public:
SDCondition id_cond, SDCondition id_cond,
std::vector<ggml_tensor*> ref_latents = {}, std::vector<ggml_tensor*> ref_latents = {},
bool increase_ref_index = false, bool increase_ref_index = false,
ggml_tensor* denoise_mask = NULL, ggml_tensor* denoise_mask = nullptr,
ggml_tensor* vace_context = NULL, ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f) { float vace_strength = 1.f) {
if (shifted_timestep > 0 && !sd_version_is_sdxl(version)) { if (shifted_timestep > 0 && !sd_version_is_sdxl(version)) {
LOG_WARN("timestep shifting is only supported for SDXL models!"); LOG_WARN("timestep shifting is only supported for SDXL models!");
@ -1157,15 +1157,15 @@ public:
struct ggml_tensor* noised_input = ggml_dup_tensor(work_ctx, x); struct ggml_tensor* noised_input = ggml_dup_tensor(work_ctx, x);
bool has_unconditioned = img_cfg_scale != 1.0 && uncond.c_crossattn != NULL; bool has_unconditioned = img_cfg_scale != 1.0 && uncond.c_crossattn != nullptr;
bool has_img_cond = cfg_scale != img_cfg_scale && img_cond.c_crossattn != NULL; bool has_img_cond = cfg_scale != img_cfg_scale && img_cond.c_crossattn != nullptr;
bool has_skiplayer = slg_scale != 0.0 && skip_layers.size() > 0; bool has_skiplayer = slg_scale != 0.0 && skip_layers.size() > 0;
// denoise wrapper // denoise wrapper
struct ggml_tensor* out_cond = ggml_dup_tensor(work_ctx, x); struct ggml_tensor* out_cond = ggml_dup_tensor(work_ctx, x);
struct ggml_tensor* out_uncond = NULL; struct ggml_tensor* out_uncond = nullptr;
struct ggml_tensor* out_skip = NULL; struct ggml_tensor* out_skip = nullptr;
struct ggml_tensor* out_img_cond = NULL; struct ggml_tensor* out_img_cond = nullptr;
if (has_unconditioned) { if (has_unconditioned) {
out_uncond = ggml_dup_tensor(work_ctx, x); out_uncond = ggml_dup_tensor(work_ctx, x);
@ -1223,7 +1223,7 @@ public:
std::vector<struct ggml_tensor*> controls; std::vector<struct ggml_tensor*> controls;
if (control_hint != NULL && control_net != NULL) { if (control_hint != nullptr && control_net != nullptr) {
control_net->compute(n_threads, noised_input, control_hint, timesteps, cond.c_crossattn, cond.c_vector); control_net->compute(n_threads, noised_input, control_hint, timesteps, cond.c_crossattn, cond.c_vector);
controls = control_net->controls; controls = control_net->controls;
// print_ggml_tensor(controls[12]); // print_ggml_tensor(controls[12]);
@ -1258,10 +1258,10 @@ public:
&out_cond); &out_cond);
} }
float* negative_data = NULL; float* negative_data = nullptr;
if (has_unconditioned) { if (has_unconditioned) {
// uncond // uncond
if (control_hint != NULL && control_net != NULL) { if (control_hint != nullptr && control_net != nullptr) {
control_net->compute(n_threads, noised_input, control_hint, timesteps, uncond.c_crossattn, uncond.c_vector); control_net->compute(n_threads, noised_input, control_hint, timesteps, uncond.c_crossattn, uncond.c_vector);
controls = control_net->controls; controls = control_net->controls;
} }
@ -1275,7 +1275,7 @@ public:
negative_data = (float*)out_uncond->data; negative_data = (float*)out_uncond->data;
} }
float* img_cond_data = NULL; float* img_cond_data = nullptr;
if (has_img_cond) { if (has_img_cond) {
diffusion_params.context = img_cond.c_crossattn; diffusion_params.context = img_cond.c_crossattn;
diffusion_params.c_concat = img_cond.c_concat; diffusion_params.c_concat = img_cond.c_concat;
@ -1288,7 +1288,7 @@ public:
int step_count = sigmas.size(); int step_count = sigmas.size();
bool is_skiplayer_step = has_skiplayer && step > (int)(guidance.slg.layer_start * step_count) && step < (int)(guidance.slg.layer_end * step_count); bool is_skiplayer_step = has_skiplayer && step > (int)(guidance.slg.layer_start * step_count) && step < (int)(guidance.slg.layer_end * step_count);
float* skip_layer_data = NULL; float* skip_layer_data = nullptr;
if (is_skiplayer_step) { if (is_skiplayer_step) {
LOG_DEBUG("Skipping layers at step %d\n", step); LOG_DEBUG("Skipping layers at step %d\n", step);
// skip layer (same as conditionned) // skip layer (same as conditionned)
@ -1479,7 +1479,7 @@ public:
ggml_tensor* vae_encode(ggml_context* work_ctx, ggml_tensor* x, bool encode_video = false) { ggml_tensor* vae_encode(ggml_context* work_ctx, ggml_tensor* x, bool encode_video = false) {
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
ggml_tensor* result = NULL; ggml_tensor* result = nullptr;
int W = x->ne[0] / 8; int W = x->ne[0] / 8;
int H = x->ne[1] / 8; int H = x->ne[1] / 8;
if (vae_tiling_params.enabled && !encode_video) { if (vae_tiling_params.enabled && !encode_video) {
@ -1526,7 +1526,7 @@ public:
if (vae_tiling_params.enabled && !encode_video) { if (vae_tiling_params.enabled && !encode_video) {
// split latent in 32x32 tiles and compute in several steps // split latent in 32x32 tiles and compute in several steps
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
tae_first_stage->compute(n_threads, in, false, &out, NULL); tae_first_stage->compute(n_threads, in, false, &out, nullptr);
}; };
sd_tiling(x, result, 8, 64, 0.5f, on_tiling); sd_tiling(x, result, 8, 64, 0.5f, on_tiling);
} else { } else {
@ -1601,7 +1601,7 @@ public:
int64_t W = x->ne[0] * 8; int64_t W = x->ne[0] * 8;
int64_t H = x->ne[1] * 8; int64_t H = x->ne[1] * 8;
int64_t C = 3; int64_t C = 3;
ggml_tensor* result = NULL; ggml_tensor* result = nullptr;
if (decode_video) { if (decode_video) {
int T = x->ne[2]; int T = x->ne[2];
if (sd_version_is_wan(version)) { if (sd_version_is_wan(version)) {
@ -1641,7 +1641,7 @@ public:
// split latent in 32x32 tiles and compute in several steps // split latent in 32x32 tiles and compute in several steps
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) { auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
first_stage_model->compute(n_threads, in, true, &out, NULL); first_stage_model->compute(n_threads, in, true, &out, nullptr);
}; };
sd_tiling_non_square(x, result, 8, tile_size_x, tile_size_y, tile_overlap, on_tiling); sd_tiling_non_square(x, result, 8, tile_size_x, tile_size_y, tile_overlap, on_tiling);
} else { } else {
@ -1818,7 +1818,7 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) {
char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) {
char* buf = (char*)malloc(4096); char* buf = (char*)malloc(4096);
if (!buf) if (!buf)
return NULL; return nullptr;
buf[0] = '\0'; buf[0] = '\0';
snprintf(buf + strlen(buf), 4096 - strlen(buf), snprintf(buf + strlen(buf), 4096 - strlen(buf),
@ -1838,7 +1838,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) {
"embedding_dir: %s\n" "embedding_dir: %s\n"
"photo_maker_path: %s\n" "photo_maker_path: %s\n"
"vae_decode_only: %s\n" "vae_decode_only: %s\n"
"vae_tiling: %s\n"
"free_params_immediately: %s\n" "free_params_immediately: %s\n"
"n_threads: %d\n" "n_threads: %d\n"
"wtype: %s\n" "wtype: %s\n"
@ -1902,7 +1901,7 @@ void sd_sample_params_init(sd_sample_params_t* sample_params) {
char* sd_sample_params_to_str(const sd_sample_params_t* sample_params) { char* sd_sample_params_to_str(const sd_sample_params_t* sample_params) {
char* buf = (char*)malloc(4096); char* buf = (char*)malloc(4096);
if (!buf) if (!buf)
return NULL; return nullptr;
buf[0] = '\0'; buf[0] = '\0';
snprintf(buf + strlen(buf), 4096 - strlen(buf), snprintf(buf + strlen(buf), 4096 - strlen(buf),
@ -1954,7 +1953,7 @@ void sd_img_gen_params_init(sd_img_gen_params_t* sd_img_gen_params) {
char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) { char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) {
char* buf = (char*)malloc(4096); char* buf = (char*)malloc(4096);
if (!buf) if (!buf)
return NULL; return nullptr;
buf[0] = '\0'; buf[0] = '\0';
char* sample_params_str = sd_sample_params_to_str(&sd_img_gen_params->sample_params); char* sample_params_str = sd_sample_params_to_str(&sd_img_gen_params->sample_params);
@ -2011,40 +2010,40 @@ void sd_vid_gen_params_init(sd_vid_gen_params_t* sd_vid_gen_params) {
} }
struct sd_ctx_t { struct sd_ctx_t {
StableDiffusionGGML* sd = NULL; StableDiffusionGGML* sd = nullptr;
}; };
sd_ctx_t* new_sd_ctx(const sd_ctx_params_t* sd_ctx_params) { sd_ctx_t* new_sd_ctx(const sd_ctx_params_t* sd_ctx_params) {
sd_ctx_t* sd_ctx = (sd_ctx_t*)malloc(sizeof(sd_ctx_t)); sd_ctx_t* sd_ctx = (sd_ctx_t*)malloc(sizeof(sd_ctx_t));
if (sd_ctx == NULL) { if (sd_ctx == nullptr) {
return NULL; return nullptr;
} }
sd_ctx->sd = new StableDiffusionGGML(); sd_ctx->sd = new StableDiffusionGGML();
if (sd_ctx->sd == NULL) { if (sd_ctx->sd == nullptr) {
free(sd_ctx); free(sd_ctx);
return NULL; return nullptr;
} }
if (!sd_ctx->sd->init(sd_ctx_params)) { if (!sd_ctx->sd->init(sd_ctx_params)) {
delete sd_ctx->sd; delete sd_ctx->sd;
sd_ctx->sd = NULL; sd_ctx->sd = nullptr;
free(sd_ctx); free(sd_ctx);
return NULL; return nullptr;
} }
return sd_ctx; return sd_ctx;
} }
void free_sd_ctx(sd_ctx_t* sd_ctx) { void free_sd_ctx(sd_ctx_t* sd_ctx) {
if (sd_ctx->sd != NULL) { if (sd_ctx->sd != nullptr) {
delete sd_ctx->sd; delete sd_ctx->sd;
sd_ctx->sd = NULL; sd_ctx->sd = nullptr;
} }
free(sd_ctx); free(sd_ctx);
} }
enum sample_method_t sd_get_default_sample_method(const sd_ctx_t* sd_ctx) { enum sample_method_t sd_get_default_sample_method(const sd_ctx_t* sd_ctx) {
if (sd_ctx != NULL && sd_ctx->sd != NULL) { if (sd_ctx != nullptr && sd_ctx->sd != nullptr) {
SDVersion version = sd_ctx->sd->version; SDVersion version = sd_ctx->sd->version;
if (sd_version_is_dit(version)) if (sd_version_is_dit(version))
return EULER; return EULER;
@ -2075,13 +2074,13 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
std::vector<sd_image_t*> ref_images, std::vector<sd_image_t*> ref_images,
std::vector<ggml_tensor*> ref_latents, std::vector<ggml_tensor*> ref_latents,
bool increase_ref_index, bool increase_ref_index,
ggml_tensor* concat_latent = NULL, ggml_tensor* concat_latent = nullptr,
ggml_tensor* denoise_mask = NULL) { ggml_tensor* denoise_mask = nullptr) {
if (seed < 0) { if (seed < 0) {
// Generally, when using the provided command line, the seed is always >0. // Generally, when using the provided command line, the seed is always >0.
// However, to prevent potential issues if 'stable-diffusion.cpp' is invoked as a library // However, to prevent potential issues if 'stable-diffusion.cpp' is invoked as a library
// by a third party with a seed <0, let's incorporate randomization here. // by a third party with a seed <0, let's incorporate randomization here.
srand((int)time(NULL)); srand((int)time(nullptr));
seed = rand(); seed = rand();
} }
@ -2102,7 +2101,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
// Photo Maker // Photo Maker
std::string prompt_text_only; std::string prompt_text_only;
ggml_tensor* init_img = NULL; ggml_tensor* init_img = nullptr;
SDCondition id_cond; SDCondition id_cond;
std::vector<bool> class_tokens_mask; std::vector<bool> class_tokens_mask;
@ -2137,7 +2136,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
sd_image_f32_t id_image = sd_image_t_to_sd_image_f32_t(pm_params.id_images[i]); sd_image_f32_t id_image = sd_image_t_to_sd_image_f32_t(pm_params.id_images[i]);
sd_image_f32_t processed_id_image = clip_preprocess(id_image, clip_image_size, clip_image_size); sd_image_f32_t processed_id_image = clip_preprocess(id_image, clip_image_size, clip_image_size);
free(id_image.data); free(id_image.data);
id_image.data = NULL; id_image.data = nullptr;
processed_id_images.push_back(processed_id_image); processed_id_images.push_back(processed_id_image);
} }
@ -2148,7 +2147,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
for (auto& image : processed_id_images) { for (auto& image : processed_id_images) {
free(image.data); free(image.data);
image.data = NULL; image.data = nullptr;
} }
processed_id_images.clear(); processed_id_images.clear();
@ -2160,7 +2159,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
condition_params); condition_params);
id_cond = std::get<0>(cond_tup); id_cond = std::get<0>(cond_tup);
class_tokens_mask = std::get<1>(cond_tup); // class_tokens_mask = std::get<1>(cond_tup); //
struct ggml_tensor* id_embeds = NULL; struct ggml_tensor* id_embeds = nullptr;
if (pmv2 && pm_params.id_embed_path != nullptr) { if (pmv2 && pm_params.id_embed_path != nullptr) {
id_embeds = load_tensor_from_file(work_ctx, pm_params.id_embed_path); id_embeds = load_tensor_from_file(work_ctx, pm_params.id_embed_path);
// print_ggml_tensor(id_embeds, true, "id_embeds:"); // print_ggml_tensor(id_embeds, true, "id_embeds:");
@ -2186,7 +2185,6 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
} }
// Get learned condition // Get learned condition
t0 = ggml_time_ms();
condition_params.text = prompt; condition_params.text = prompt;
condition_params.zero_out_masked = false; condition_params.zero_out_masked = false;
SDCondition cond = sd_ctx->sd->cond_stage_model->get_learned_condition(work_ctx, SDCondition cond = sd_ctx->sd->cond_stage_model->get_learned_condition(work_ctx,
@ -2214,8 +2212,8 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
} }
// Control net hint // Control net hint
struct ggml_tensor* image_hint = NULL; struct ggml_tensor* image_hint = nullptr;
if (control_image.data != NULL) { if (control_image.data != nullptr) {
image_hint = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1); image_hint = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1);
sd_image_to_tensor(control_image, image_hint); sd_image_to_tensor(control_image, image_hint);
} }
@ -2234,8 +2232,8 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
int H = height / 8; int H = height / 8;
LOG_INFO("sampling using %s method", sampling_methods_str[sample_method]); LOG_INFO("sampling using %s method", sampling_methods_str[sample_method]);
struct ggml_tensor* control_latent = NULL; struct ggml_tensor* control_latent = nullptr;
if (sd_version_is_control(sd_ctx->sd->version) && image_hint != NULL) { if (sd_version_is_control(sd_ctx->sd->version) && image_hint != nullptr) {
control_latent = sd_ctx->sd->encode_first_stage(work_ctx, image_hint); control_latent = sd_ctx->sd->encode_first_stage(work_ctx, image_hint);
ggml_tensor_scale(control_latent, control_strength); ggml_tensor_scale(control_latent, control_strength);
} }
@ -2273,8 +2271,8 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
} }
} }
if (sd_ctx->sd->version == VERSION_FLEX_2 && control_latent != NULL && sd_ctx->sd->control_net == NULL) { if (sd_ctx->sd->version == VERSION_FLEX_2 && control_latent != nullptr && sd_ctx->sd->control_net == nullptr) {
bool no_inpaint = concat_latent == NULL; bool no_inpaint = concat_latent == nullptr;
if (no_inpaint) { if (no_inpaint) {
concat_latent = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, init_latent->ne[0], init_latent->ne[1], mask_channels + init_latent->ne[2], 1); concat_latent = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, init_latent->ne[0], init_latent->ne[1], mask_channels + init_latent->ne[2], 1);
} }
@ -2293,33 +2291,33 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
} }
} }
} }
} else if (concat_latent == NULL) { } else if (concat_latent == nullptr) {
concat_latent = empty_latent; concat_latent = empty_latent;
} }
cond.c_concat = concat_latent; cond.c_concat = concat_latent;
uncond.c_concat = empty_latent; uncond.c_concat = empty_latent;
denoise_mask = NULL; denoise_mask = nullptr;
} else if (sd_version_is_unet_edit(sd_ctx->sd->version)) { } else if (sd_version_is_unet_edit(sd_ctx->sd->version)) {
auto empty_latent = ggml_dup_tensor(work_ctx, init_latent); auto empty_latent = ggml_dup_tensor(work_ctx, init_latent);
ggml_set_f32(empty_latent, 0); ggml_set_f32(empty_latent, 0);
uncond.c_concat = empty_latent; uncond.c_concat = empty_latent;
cond.c_concat = ref_latents[0]; cond.c_concat = ref_latents[0];
if (cond.c_concat == NULL) { if (cond.c_concat == nullptr) {
cond.c_concat = empty_latent; cond.c_concat = empty_latent;
} }
} else if (sd_version_is_control(sd_ctx->sd->version)) { } else if (sd_version_is_control(sd_ctx->sd->version)) {
auto empty_latent = ggml_dup_tensor(work_ctx, init_latent); auto empty_latent = ggml_dup_tensor(work_ctx, init_latent);
ggml_set_f32(empty_latent, 0); ggml_set_f32(empty_latent, 0);
uncond.c_concat = empty_latent; uncond.c_concat = empty_latent;
if (sd_ctx->sd->control_net == NULL) { if (sd_ctx->sd->control_net == nullptr) {
cond.c_concat = control_latent; cond.c_concat = control_latent;
} }
if (cond.c_concat == NULL) { if (cond.c_concat == nullptr) {
cond.c_concat = empty_latent; cond.c_concat = empty_latent;
} }
} }
SDCondition img_cond; SDCondition img_cond;
if (uncond.c_crossattn != NULL && if (uncond.c_crossattn != nullptr &&
(sd_version_is_inpaint_or_unet_edit(sd_ctx->sd->version) && guidance.txt_cfg != guidance.img_cfg)) { (sd_version_is_inpaint_or_unet_edit(sd_ctx->sd->version) && guidance.txt_cfg != guidance.img_cfg)) {
img_cond = SDCondition(uncond.c_crossattn, uncond.c_vector, cond.c_concat); img_cond = SDCondition(uncond.c_crossattn, uncond.c_vector, cond.c_concat);
} }
@ -2380,7 +2378,7 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
t1 = ggml_time_ms(); t1 = ggml_time_ms();
struct ggml_tensor* img = sd_ctx->sd->decode_first_stage(work_ctx, final_latents[i] /* x_0 */); struct ggml_tensor* img = sd_ctx->sd->decode_first_stage(work_ctx, final_latents[i] /* x_0 */);
// print_ggml_tensor(img); // print_ggml_tensor(img);
if (img != NULL) { if (img != nullptr) {
decoded_images.push_back(img); decoded_images.push_back(img);
} }
int64_t t2 = ggml_time_ms(); int64_t t2 = ggml_time_ms();
@ -2393,9 +2391,9 @@ sd_image_t* generate_image_internal(sd_ctx_t* sd_ctx,
sd_ctx->sd->first_stage_model->free_params_buffer(); sd_ctx->sd->first_stage_model->free_params_buffer();
} }
sd_image_t* result_images = (sd_image_t*)calloc(batch_count, sizeof(sd_image_t)); sd_image_t* result_images = (sd_image_t*)calloc(batch_count, sizeof(sd_image_t));
if (result_images == NULL) { if (result_images == nullptr) {
ggml_free(work_ctx); ggml_free(work_ctx);
return NULL; return nullptr;
} }
for (size_t i = 0; i < decoded_images.size(); i++) { for (size_t i = 0; i < decoded_images.size(); i++) {
@ -2460,35 +2458,35 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
model_version_to_str[sd_ctx->sd->version], model_version_to_str[sd_ctx->sd->version],
width, width,
height); height);
return NULL; return nullptr;
} }
} else if (width % 64 || height % 64) { } else if (width % 64 || height % 64) {
LOG_ERROR("Image dimensions must be must be a multiple of 64 on each axis for %s models. (Got %dx%d)", LOG_ERROR("Image dimensions must be must be a multiple of 64 on each axis for %s models. (Got %dx%d)",
model_version_to_str[sd_ctx->sd->version], model_version_to_str[sd_ctx->sd->version],
width, width,
height); height);
return NULL; return nullptr;
} }
LOG_DEBUG("generate_image %dx%d", width, height); LOG_DEBUG("generate_image %dx%d", width, height);
if (sd_ctx == NULL || sd_img_gen_params == NULL) { if (sd_ctx == nullptr || sd_img_gen_params == nullptr) {
return NULL; return nullptr;
} }
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
// LOG_DEBUG("mem_size %u ", params.mem_size); // LOG_DEBUG("mem_size %u ", params.mem_size);
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
if (!work_ctx) { if (!work_ctx) {
LOG_ERROR("ggml_init() failed"); LOG_ERROR("ggml_init() failed");
return NULL; return nullptr;
} }
int64_t seed = sd_img_gen_params->seed; int64_t seed = sd_img_gen_params->seed;
if (seed < 0) { if (seed < 0) {
srand((int)time(NULL)); srand((int)time(nullptr));
seed = rand(); seed = rand();
} }
sd_ctx->sd->rng->manual_seed(seed); sd_ctx->sd->rng->manual_seed(seed);
@ -2500,9 +2498,9 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
sd_ctx->sd->init_scheduler(sd_img_gen_params->sample_params.scheduler); sd_ctx->sd->init_scheduler(sd_img_gen_params->sample_params.scheduler);
std::vector<float> sigmas = sd_ctx->sd->denoiser->get_sigmas(sample_steps); std::vector<float> sigmas = sd_ctx->sd->denoiser->get_sigmas(sample_steps);
ggml_tensor* init_latent = NULL; ggml_tensor* init_latent = nullptr;
ggml_tensor* concat_latent = NULL; ggml_tensor* concat_latent = nullptr;
ggml_tensor* denoise_mask = NULL; ggml_tensor* denoise_mask = nullptr;
if (sd_img_gen_params->init_image.data) { if (sd_img_gen_params->init_image.data) {
LOG_INFO("IMG2IMG"); LOG_INFO("IMG2IMG");
@ -2529,7 +2527,7 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
} else if (sd_ctx->sd->version == VERSION_FLEX_2) { } else if (sd_ctx->sd->version == VERSION_FLEX_2) {
mask_channels = 1 + init_latent->ne[2]; mask_channels = 1 + init_latent->ne[2];
} }
ggml_tensor* masked_latent = NULL; ggml_tensor* masked_latent = nullptr;
if (sd_ctx->sd->version != VERSION_FLEX_2) { if (sd_ctx->sd->version != VERSION_FLEX_2) {
// most inpaint models mask before vae // most inpaint models mask before vae
@ -2672,7 +2670,7 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
ref_latents.push_back(latent); ref_latents.push_back(latent);
} }
if (sd_img_gen_params->init_image.data != NULL || sd_img_gen_params->ref_images_count > 0) { if (sd_img_gen_params->init_image.data != nullptr || sd_img_gen_params->ref_images_count > 0) {
size_t t1 = ggml_time_ms(); size_t t1 = ggml_time_ms();
LOG_INFO("encode_first_stage completed, taking %.2fs", (t1 - t0) * 1.0f / 1000); LOG_INFO("encode_first_stage completed, taking %.2fs", (t1 - t0) * 1.0f / 1000);
} }
@ -2714,8 +2712,8 @@ sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_g
} }
SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* sd_vid_gen_params, int* num_frames_out) { SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* sd_vid_gen_params, int* num_frames_out) {
if (sd_ctx == NULL || sd_vid_gen_params == NULL) { if (sd_ctx == nullptr || sd_vid_gen_params == nullptr) {
return NULL; return nullptr;
} }
std::string prompt = SAFE_STR(sd_vid_gen_params->prompt); std::string prompt = SAFE_STR(sd_vid_gen_params->prompt);
@ -2752,24 +2750,23 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
} }
} }
LOG_DEBUG("switching from high noise model at step %d", high_noise_sample_steps); LOG_DEBUG("switching from high noise model at step %d", high_noise_sample_steps);
sample_steps = total_steps - high_noise_sample_steps;
} }
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
// LOG_DEBUG("mem_size %u ", params.mem_size); // LOG_DEBUG("mem_size %u ", params.mem_size);
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
if (!work_ctx) { if (!work_ctx) {
LOG_ERROR("ggml_init() failed"); LOG_ERROR("ggml_init() failed");
return NULL; return nullptr;
} }
int64_t seed = sd_vid_gen_params->seed; int64_t seed = sd_vid_gen_params->seed;
if (seed < 0) { if (seed < 0) {
seed = (int)time(NULL); seed = (int)time(nullptr);
} }
sd_ctx->sd->rng->manual_seed(seed); sd_ctx->sd->rng->manual_seed(seed);
@ -2779,11 +2776,11 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
// Apply lora // Apply lora
prompt = sd_ctx->sd->apply_loras_from_prompt(prompt); prompt = sd_ctx->sd->apply_loras_from_prompt(prompt);
ggml_tensor* init_latent = NULL; ggml_tensor* init_latent = nullptr;
ggml_tensor* clip_vision_output = NULL; ggml_tensor* clip_vision_output = nullptr;
ggml_tensor* concat_latent = NULL; ggml_tensor* concat_latent = nullptr;
ggml_tensor* denoise_mask = NULL; ggml_tensor* denoise_mask = nullptr;
ggml_tensor* vace_context = NULL; ggml_tensor* vace_context = nullptr;
int64_t ref_image_num = 0; // for vace int64_t ref_image_num = 0; // for vace
if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-I2V-14B" || if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-I2V-14B" ||
sd_ctx->sd->diffusion_model->get_desc() == "Wan2.2-I2V-14B" || sd_ctx->sd->diffusion_model->get_desc() == "Wan2.2-I2V-14B" ||
@ -2799,7 +2796,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
} }
if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-FLF2V-14B") { if (sd_ctx->sd->diffusion_model->get_desc() == "Wan2.1-FLF2V-14B") {
ggml_tensor* end_image_clip_vision_output = NULL; ggml_tensor* end_image_clip_vision_output = nullptr;
if (sd_vid_gen_params->end_image.data) { if (sd_vid_gen_params->end_image.data) {
end_image_clip_vision_output = sd_ctx->sd->get_clip_vision_output(work_ctx, sd_vid_gen_params->end_image, false, -2); end_image_clip_vision_output = sd_ctx->sd->get_clip_vision_output(work_ctx, sd_vid_gen_params->end_image, false, -2);
} else { } else {
@ -2880,7 +2877,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
sd_ctx->sd->diffusion_model->get_desc() == "Wan2.x-VACE-14B") { sd_ctx->sd->diffusion_model->get_desc() == "Wan2.x-VACE-14B") {
LOG_INFO("VACE"); LOG_INFO("VACE");
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
ggml_tensor* ref_image_latent = NULL; ggml_tensor* ref_image_latent = nullptr;
if (sd_vid_gen_params->init_image.data) { if (sd_vid_gen_params->init_image.data) {
ggml_tensor* ref_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1); ggml_tensor* ref_img = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, width, height, 3, 1);
sd_image_to_tensor(sd_vid_gen_params->init_image, ref_img); sd_image_to_tensor(sd_vid_gen_params->init_image, ref_img);
@ -2953,7 +2950,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
LOG_INFO("encode_first_stage completed, taking %" PRId64 " ms", t2 - t1); LOG_INFO("encode_first_stage completed, taking %" PRId64 " ms", t2 - t1);
} }
if (init_latent == NULL) { if (init_latent == nullptr) {
init_latent = generate_init_latent(sd_ctx, work_ctx, width, height, frames, true); init_latent = generate_init_latent(sd_ctx, work_ctx, width, height, frames, true);
} }
@ -3016,7 +3013,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
cond, cond,
uncond, uncond,
{}, {},
NULL, nullptr,
0, 0,
sd_vid_gen_params->high_noise_sample_params.guidance, sd_vid_gen_params->high_noise_sample_params.guidance,
sd_vid_gen_params->high_noise_sample_params.eta, sd_vid_gen_params->high_noise_sample_params.eta,
@ -3036,7 +3033,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
if (sd_ctx->sd->free_params_immediately) { if (sd_ctx->sd->free_params_immediately) {
sd_ctx->sd->high_noise_diffusion_model->free_params_buffer(); sd_ctx->sd->high_noise_diffusion_model->free_params_buffer();
} }
noise = NULL; noise = nullptr;
} }
// Sample // Sample
@ -3052,7 +3049,7 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
cond, cond,
uncond, uncond,
{}, {},
NULL, nullptr,
0, 0,
sd_vid_gen_params->sample_params.guidance, sd_vid_gen_params->sample_params.guidance,
sd_vid_gen_params->sample_params.eta, sd_vid_gen_params->sample_params.eta,
@ -3098,9 +3095,9 @@ SD_API sd_image_t* generate_video(sd_ctx_t* sd_ctx, const sd_vid_gen_params_t* s
} }
sd_image_t* result_images = (sd_image_t*)calloc(vid->ne[2], sizeof(sd_image_t)); sd_image_t* result_images = (sd_image_t*)calloc(vid->ne[2], sizeof(sd_image_t));
if (result_images == NULL) { if (result_images == nullptr) {
ggml_free(work_ctx); ggml_free(work_ctx);
return NULL; return nullptr;
} }
*num_frames_out = vid->ne[2]; *num_frames_out = vid->ne[2];

66
t5.hpp
View File

@ -1,7 +1,7 @@
#ifndef __T5_HPP__ #ifndef __T5_HPP__
#define __T5_HPP__ #define __T5_HPP__
#include <float.h> #include <cfloat>
#include <limits> #include <limits>
#include <map> #include <map>
#include <memory> #include <memory>
@ -461,7 +461,7 @@ protected:
int64_t hidden_size; int64_t hidden_size;
float eps; float eps;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type wtype = GGML_TYPE_F32; enum ggml_type wtype = GGML_TYPE_F32;
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size); params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
} }
@ -472,7 +472,7 @@ public:
: hidden_size(hidden_size), : hidden_size(hidden_size),
eps(eps) {} eps(eps) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
x = ggml_rms_norm(ctx, x, eps); x = ggml_rms_norm(ctx, x, eps);
x = ggml_mul(ctx, x, w); x = ggml_mul(ctx, x, w);
@ -487,7 +487,7 @@ public:
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false)); blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N, n_token, model_dim] // x: [N, n_token, model_dim]
auto wi = std::dynamic_pointer_cast<Linear>(blocks["wi"]); auto wi = std::dynamic_pointer_cast<Linear>(blocks["wi"]);
auto wo = std::dynamic_pointer_cast<Linear>(blocks["wo"]); auto wo = std::dynamic_pointer_cast<Linear>(blocks["wo"]);
@ -509,7 +509,7 @@ public:
blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false, false, false, scale)); blocks["wo"] = std::shared_ptr<GGMLBlock>(new Linear(ff_dim, model_dim, false, false, false, scale));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N, n_token, model_dim] // x: [N, n_token, model_dim]
auto wi_0 = std::dynamic_pointer_cast<Linear>(blocks["wi_0"]); auto wi_0 = std::dynamic_pointer_cast<Linear>(blocks["wi_0"]);
auto wi_1 = std::dynamic_pointer_cast<Linear>(blocks["wi_1"]); auto wi_1 = std::dynamic_pointer_cast<Linear>(blocks["wi_1"]);
@ -530,7 +530,7 @@ public:
blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim)); blocks["layer_norm"] = std::shared_ptr<GGMLBlock>(new T5LayerNorm(model_dim));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N, n_token, model_dim] // x: [N, n_token, model_dim]
auto DenseReluDense = std::dynamic_pointer_cast<T5DenseGatedActDense>(blocks["DenseReluDense"]); auto DenseReluDense = std::dynamic_pointer_cast<T5DenseGatedActDense>(blocks["DenseReluDense"]);
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]); auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
@ -582,9 +582,9 @@ public:
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(struct ggml_context* ctx, std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(struct ggml_context* ctx,
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* past_bias = NULL, struct ggml_tensor* past_bias = nullptr,
struct ggml_tensor* mask = NULL, struct ggml_tensor* mask = nullptr,
struct ggml_tensor* relative_position_bucket = NULL) { struct ggml_tensor* relative_position_bucket = nullptr) {
auto q_proj = std::dynamic_pointer_cast<Linear>(blocks["q"]); auto q_proj = std::dynamic_pointer_cast<Linear>(blocks["q"]);
auto k_proj = std::dynamic_pointer_cast<Linear>(blocks["k"]); auto k_proj = std::dynamic_pointer_cast<Linear>(blocks["k"]);
auto v_proj = std::dynamic_pointer_cast<Linear>(blocks["v"]); auto v_proj = std::dynamic_pointer_cast<Linear>(blocks["v"]);
@ -597,11 +597,11 @@ public:
auto k = k_proj->forward(ctx, x); auto k = k_proj->forward(ctx, x);
auto v = v_proj->forward(ctx, x); auto v = v_proj->forward(ctx, x);
if (using_relative_attention_bias && relative_position_bucket != NULL) { if (using_relative_attention_bias && relative_position_bucket != nullptr) {
past_bias = compute_bias(ctx, relative_position_bucket); past_bias = compute_bias(ctx, relative_position_bucket);
} }
if (past_bias != NULL) { if (past_bias != nullptr) {
if (mask != NULL) { if (mask != nullptr) {
mask = ggml_repeat(ctx, mask, past_bias); mask = ggml_repeat(ctx, mask, past_bias);
mask = ggml_add(ctx, mask, past_bias); mask = ggml_add(ctx, mask, past_bias);
} else { } else {
@ -632,9 +632,9 @@ public:
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(struct ggml_context* ctx, std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(struct ggml_context* ctx,
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* past_bias = NULL, struct ggml_tensor* past_bias = nullptr,
struct ggml_tensor* mask = NULL, struct ggml_tensor* mask = nullptr,
struct ggml_tensor* relative_position_bucket = NULL) { struct ggml_tensor* relative_position_bucket = nullptr) {
// x: [N, n_token, model_dim] // x: [N, n_token, model_dim]
auto SelfAttention = std::dynamic_pointer_cast<T5Attention>(blocks["SelfAttention"]); auto SelfAttention = std::dynamic_pointer_cast<T5Attention>(blocks["SelfAttention"]);
auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]); auto layer_norm = std::dynamic_pointer_cast<T5LayerNorm>(blocks["layer_norm"]);
@ -659,9 +659,9 @@ public:
std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(struct ggml_context* ctx, std::pair<struct ggml_tensor*, struct ggml_tensor*> forward(struct ggml_context* ctx,
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* past_bias = NULL, struct ggml_tensor* past_bias = nullptr,
struct ggml_tensor* mask = NULL, struct ggml_tensor* mask = nullptr,
struct ggml_tensor* relative_position_bucket = NULL) { struct ggml_tensor* relative_position_bucket = nullptr) {
// x: [N, n_token, model_dim] // x: [N, n_token, model_dim]
auto layer_0 = std::dynamic_pointer_cast<T5LayerSelfAttention>(blocks["layer.0"]); auto layer_0 = std::dynamic_pointer_cast<T5LayerSelfAttention>(blocks["layer.0"]);
auto layer_1 = std::dynamic_pointer_cast<T5LayerFF>(blocks["layer.1"]); auto layer_1 = std::dynamic_pointer_cast<T5LayerFF>(blocks["layer.1"]);
@ -695,9 +695,9 @@ public:
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* forward(struct ggml_context* ctx,
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* past_bias = NULL, struct ggml_tensor* past_bias = nullptr,
struct ggml_tensor* attention_mask = NULL, struct ggml_tensor* attention_mask = nullptr,
struct ggml_tensor* relative_position_bucket = NULL) { struct ggml_tensor* relative_position_bucket = nullptr) {
// x: [N, n_token, model_dim] // x: [N, n_token, model_dim]
for (int i = 0; i < num_layers; i++) { for (int i = 0; i < num_layers; i++) {
auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]); auto block = std::dynamic_pointer_cast<T5Block>(blocks["block." + std::to_string(i)]);
@ -743,9 +743,9 @@ public:
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* forward(struct ggml_context* ctx,
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* input_ids, struct ggml_tensor* input_ids,
struct ggml_tensor* past_bias = NULL, struct ggml_tensor* past_bias = nullptr,
struct ggml_tensor* attention_mask = NULL, struct ggml_tensor* attention_mask = nullptr,
struct ggml_tensor* relative_position_bucket = NULL) { struct ggml_tensor* relative_position_bucket = nullptr) {
// input_ids: [N, n_token] // input_ids: [N, n_token]
auto shared = std::dynamic_pointer_cast<Embedding>(blocks["shared"]); auto shared = std::dynamic_pointer_cast<Embedding>(blocks["shared"]);
@ -776,7 +776,7 @@ struct T5Runner : public GGMLRunner {
model.init(params_ctx, tensor_types, prefix); model.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "t5"; return "t5";
} }
@ -788,16 +788,16 @@ struct T5Runner : public GGMLRunner {
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* input_ids, struct ggml_tensor* input_ids,
struct ggml_tensor* relative_position_bucket, struct ggml_tensor* relative_position_bucket,
struct ggml_tensor* attention_mask = NULL) { struct ggml_tensor* attention_mask = nullptr) {
size_t N = input_ids->ne[1]; size_t N = input_ids->ne[1];
size_t n_token = input_ids->ne[0]; size_t n_token = input_ids->ne[0];
auto hidden_states = model.forward(ctx, backend, input_ids, NULL, attention_mask, relative_position_bucket); // [N, n_token, model_dim] auto hidden_states = model.forward(ctx, backend, input_ids, nullptr, attention_mask, relative_position_bucket); // [N, n_token, model_dim]
return hidden_states; return hidden_states;
} }
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids, struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
struct ggml_tensor* attention_mask = NULL) { struct ggml_tensor* attention_mask = nullptr) {
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx); struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
input_ids = to_backend(input_ids); input_ids = to_backend(input_ids);
@ -829,7 +829,7 @@ struct T5Runner : public GGMLRunner {
struct ggml_tensor* input_ids, struct ggml_tensor* input_ids,
struct ggml_tensor* attention_mask, struct ggml_tensor* attention_mask,
ggml_tensor** output, ggml_tensor** output,
ggml_context* output_ctx = NULL) { ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(input_ids, attention_mask); return build_graph(input_ids, attention_mask);
}; };
@ -968,11 +968,11 @@ struct T5Embedder {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
std::string text("a lovely cat"); std::string text("a lovely cat");
@ -987,7 +987,7 @@ struct T5Embedder {
printf("\n"); printf("\n");
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, tokens);
auto attention_mask = vector_to_ggml_tensor(work_ctx, masks); auto attention_mask = vector_to_ggml_tensor(work_ctx, masks);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
model.compute(8, input_ids, attention_mask, &out, work_ctx); model.compute(8, input_ids, attention_mask, &out, work_ctx);
@ -1022,7 +1022,7 @@ struct T5Embedder {
} }
} }
std::shared_ptr<T5Embedder> t5 = std::shared_ptr<T5Embedder>(new T5Embedder(backend, false, tensor_types, "", true)); std::shared_ptr<T5Embedder> t5 = std::make_shared<T5Embedder>(backend, false, tensor_types, "", true);
t5->alloc_params_buffer(); t5->alloc_params_buffer();
std::map<std::string, ggml_tensor*> tensors; std::map<std::string, ggml_tensor*> tensors;

10
tae.hpp
View File

@ -29,7 +29,7 @@ public:
} }
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [n, n_in, h, w] // x: [n, n_in, h, w]
// return: [n, n_out, h, w] // return: [n, n_out, h, w]
@ -86,7 +86,7 @@ public:
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1})); blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, z_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [n, in_channels, h, w] // x: [n, in_channels, h, w]
// return: [n, z_channels, h/8, w/8] // return: [n, z_channels, h/8, w/8]
@ -136,7 +136,7 @@ public:
blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1})); blocks[std::to_string(index++)] = std::shared_ptr<GGMLBlock>(new Conv2d(channels, out_channels, {3, 3}, {1, 1}, {1, 1}));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* z) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* z) override {
// z: [n, z_channels, h, w] // z: [n, z_channels, h, w]
// return: [n, out_channels, h*8, w*8] // return: [n, out_channels, h*8, w*8]
@ -218,7 +218,7 @@ struct TinyAutoEncoder : public GGMLRunner {
} }
} }
std::string get_desc() { std::string get_desc() override {
return "taesd"; return "taesd";
} }
@ -261,7 +261,7 @@ struct TinyAutoEncoder : public GGMLRunner {
struct ggml_tensor* z, struct ggml_tensor* z,
bool decode_graph, bool decode_graph,
struct ggml_tensor** output, struct ggml_tensor** output,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(z, decode_graph); return build_graph(z, decode_graph);
}; };

View File

@ -384,8 +384,8 @@ public:
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* c_concat = NULL, struct ggml_tensor* c_concat = nullptr,
struct ggml_tensor* y = NULL, struct ggml_tensor* y = nullptr,
int num_video_frames = -1, int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f) { float control_strength = 0.f) {
@ -395,20 +395,20 @@ public:
// c_concat: [N, in_channels, h, w] or [1, in_channels, h, w] // c_concat: [N, in_channels, h, w] or [1, in_channels, h, w]
// y: [N, adm_in_channels] or [1, adm_in_channels] // y: [N, adm_in_channels] or [1, adm_in_channels]
// return: [N, out_channels, h, w] // return: [N, out_channels, h, w]
if (context != NULL) { if (context != nullptr) {
if (context->ne[2] != x->ne[3]) { if (context->ne[2] != x->ne[3]) {
context = ggml_repeat(ctx, context, ggml_new_tensor_3d(ctx, GGML_TYPE_F32, context->ne[0], context->ne[1], x->ne[3])); context = ggml_repeat(ctx, context, ggml_new_tensor_3d(ctx, GGML_TYPE_F32, context->ne[0], context->ne[1], x->ne[3]));
} }
} }
if (c_concat != NULL) { if (c_concat != nullptr) {
if (c_concat->ne[3] != x->ne[3]) { if (c_concat->ne[3] != x->ne[3]) {
c_concat = ggml_repeat(ctx, c_concat, x); c_concat = ggml_repeat(ctx, c_concat, x);
} }
x = ggml_concat(ctx, x, c_concat, 2); x = ggml_concat(ctx, x, c_concat, 2);
} }
if (y != NULL) { if (y != nullptr) {
if (y->ne[1] != x->ne[3]) { if (y->ne[1] != x->ne[3]) {
y = ggml_repeat(ctx, y, ggml_new_tensor_2d(ctx, GGML_TYPE_F32, y->ne[0], x->ne[3])); y = ggml_repeat(ctx, y, ggml_new_tensor_2d(ctx, GGML_TYPE_F32, y->ne[0], x->ne[3]));
} }
@ -428,7 +428,7 @@ public:
emb = time_embed_2->forward(ctx, emb); // [N, time_embed_dim] emb = time_embed_2->forward(ctx, emb); // [N, time_embed_dim]
// SDXL/SVD // SDXL/SVD
if (y != NULL) { if (y != nullptr) {
auto label_embed_0 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.0"]); auto label_embed_0 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.0"]);
auto label_embed_2 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.2"]); auto label_embed_2 = std::dynamic_pointer_cast<Linear>(blocks["label_emb.0.2"]);
@ -562,7 +562,7 @@ struct UNetModelRunner : public GGMLRunner {
} }
} }
std::string get_desc() { std::string get_desc() override {
return "unet"; return "unet";
} }
@ -573,8 +573,8 @@ struct UNetModelRunner : public GGMLRunner {
struct ggml_cgraph* build_graph(struct ggml_tensor* x, struct ggml_cgraph* build_graph(struct ggml_tensor* x,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* c_concat = NULL, struct ggml_tensor* c_concat = nullptr,
struct ggml_tensor* y = NULL, struct ggml_tensor* y = nullptr,
int num_video_frames = -1, int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f) { float control_strength = 0.f) {
@ -619,8 +619,8 @@ struct UNetModelRunner : public GGMLRunner {
int num_video_frames = -1, int num_video_frames = -1,
std::vector<struct ggml_tensor*> controls = {}, std::vector<struct ggml_tensor*> controls = {},
float control_strength = 0.f, float control_strength = 0.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// timesteps: [N, ] // timesteps: [N, ]
// context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size] // context: [N, max_position, hidden_size]([N, 77, 768]) or [1, max_position, hidden_size]
@ -636,11 +636,11 @@ struct UNetModelRunner : public GGMLRunner {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
// CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass // CPU, num_video_frames = 1, x{num_video_frames, 8, 8, 8}: Pass
@ -663,10 +663,10 @@ struct UNetModelRunner : public GGMLRunner {
ggml_set_f32(y, 0.5f); ggml_set_f32(y, 0.5f);
// print_ggml_tensor(y); // print_ggml_tensor(y);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, timesteps, context, NULL, y, num_video_frames, {}, 0.f, &out, work_ctx); compute(8, x, timesteps, context, nullptr, y, num_video_frames, {}, 0.f, &out, work_ctx);
int t1 = ggml_time_ms(); int t1 = ggml_time_ms();
print_ggml_tensor(out); print_ggml_tensor(out);

View File

@ -4,7 +4,7 @@
#include "stable-diffusion.h" #include "stable-diffusion.h"
struct UpscalerGGML { struct UpscalerGGML {
ggml_backend_t backend = NULL; // general backend ggml_backend_t backend = nullptr; // general backend
ggml_type model_data_type = GGML_TYPE_F16; ggml_type model_data_type = GGML_TYPE_F16;
std::shared_ptr<ESRGAN> esrgan_upscaler; std::shared_ptr<ESRGAN> esrgan_upscaler;
std::string esrgan_path; std::string esrgan_path;
@ -63,7 +63,7 @@ struct UpscalerGGML {
sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) { sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor) {
// upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth // upscale_factor, unused for RealESRGAN_x4plus_anime_6B.pth
sd_image_t upscaled_image = {0, 0, 0, NULL}; sd_image_t upscaled_image = {0, 0, 0, nullptr};
int output_width = (int)input_image.width * esrgan_upscaler->scale; int output_width = (int)input_image.width * esrgan_upscaler->scale;
int output_height = (int)input_image.height * esrgan_upscaler->scale; int output_height = (int)input_image.height * esrgan_upscaler->scale;
LOG_INFO("upscaling from (%i x %i) to (%i x %i)", LOG_INFO("upscaling from (%i x %i) to (%i x %i)",
@ -71,7 +71,7 @@ struct UpscalerGGML {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
// draft context // draft context
@ -107,7 +107,7 @@ struct UpscalerGGML {
}; };
struct upscaler_ctx_t { struct upscaler_ctx_t {
UpscalerGGML* upscaler = NULL; UpscalerGGML* upscaler = nullptr;
}; };
upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str,
@ -115,21 +115,21 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str,
bool direct, bool direct,
int n_threads) { int n_threads) {
upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t));
if (upscaler_ctx == NULL) { if (upscaler_ctx == nullptr) {
return NULL; return nullptr;
} }
std::string esrgan_path(esrgan_path_c_str); std::string esrgan_path(esrgan_path_c_str);
upscaler_ctx->upscaler = new UpscalerGGML(n_threads, direct); upscaler_ctx->upscaler = new UpscalerGGML(n_threads, direct);
if (upscaler_ctx->upscaler == NULL) { if (upscaler_ctx->upscaler == nullptr) {
return NULL; return nullptr;
} }
if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads)) { if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads)) {
delete upscaler_ctx->upscaler; delete upscaler_ctx->upscaler;
upscaler_ctx->upscaler = NULL; upscaler_ctx->upscaler = nullptr;
free(upscaler_ctx); free(upscaler_ctx);
return NULL; return nullptr;
} }
return upscaler_ctx; return upscaler_ctx;
} }
@ -139,16 +139,16 @@ sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_
} }
int get_upscale_factor(upscaler_ctx_t* upscaler_ctx) { int get_upscale_factor(upscaler_ctx_t* upscaler_ctx) {
if (upscaler_ctx == NULL || upscaler_ctx->upscaler == NULL || upscaler_ctx->upscaler->esrgan_upscaler == NULL) { if (upscaler_ctx == nullptr || upscaler_ctx->upscaler == nullptr || upscaler_ctx->upscaler->esrgan_upscaler == nullptr) {
return 1; return 1;
} }
return upscaler_ctx->upscaler->esrgan_upscaler->scale; return upscaler_ctx->upscaler->esrgan_upscaler->scale;
} }
void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx) { void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx) {
if (upscaler_ctx->upscaler != NULL) { if (upscaler_ctx->upscaler != nullptr) {
delete upscaler_ctx->upscaler; delete upscaler_ctx->upscaler;
upscaler_ctx->upscaler = NULL; upscaler_ctx->upscaler = nullptr;
} }
free(upscaler_ctx); free(upscaler_ctx);
} }

View File

@ -1,8 +1,8 @@
#include "util.h" #include "util.h"
#include <stdarg.h>
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <codecvt> #include <codecvt>
#include <cstdarg>
#include <fstream> #include <fstream>
#include <locale> #include <locale>
#include <sstream> #include <sstream>
@ -64,7 +64,7 @@ std::string format(const char* fmt, ...) {
va_list ap2; va_list ap2;
va_start(ap, fmt); va_start(ap, fmt);
va_copy(ap2, ap); va_copy(ap2, ap);
int size = vsnprintf(NULL, 0, fmt, ap); int size = vsnprintf(nullptr, 0, fmt, ap);
std::vector<char> buf(size + 1); std::vector<char> buf(size + 1);
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
va_end(ap2); va_end(ap2);
@ -170,11 +170,11 @@ int32_t get_num_physical_cores() {
#elif defined(__APPLE__) && defined(__MACH__) #elif defined(__APPLE__) && defined(__MACH__)
int32_t num_physical_cores; int32_t num_physical_cores;
size_t len = sizeof(num_physical_cores); size_t len = sizeof(num_physical_cores);
int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, NULL, 0); int result = sysctlbyname("hw.perflevel0.physicalcpu", &num_physical_cores, &len, nullptr, 0);
if (result == 0) { if (result == 0) {
return num_physical_cores; return num_physical_cores;
} }
result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, NULL, 0); result = sysctlbyname("hw.physicalcpu", &num_physical_cores, &len, nullptr, 0);
if (result == 0) { if (result == 0) {
return num_physical_cores; return num_physical_cores;
} }
@ -185,8 +185,8 @@ int32_t get_num_physical_cores() {
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4; return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
} }
static sd_progress_cb_t sd_progress_cb = NULL; static sd_progress_cb_t sd_progress_cb = nullptr;
void* sd_progress_cb_data = NULL; void* sd_progress_cb_data = nullptr;
std::u32string utf8_to_utf32(const std::string& utf8_str) { std::u32string utf8_to_utf32(const std::string& utf8_str) {
std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter; std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter;
@ -296,8 +296,8 @@ std::string trim(const std::string& s) {
return rtrim(ltrim(s)); return rtrim(ltrim(s));
} }
static sd_log_cb_t sd_log_cb = NULL; static sd_log_cb_t sd_log_cb = nullptr;
void* sd_log_cb_data = NULL; void* sd_log_cb_data = nullptr;
#define LOG_BUFFER_SIZE 4096 #define LOG_BUFFER_SIZE 4096

28
vae.hpp
View File

@ -30,7 +30,7 @@ public:
} }
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
// t_emb is always None // t_emb is always None
auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]); auto norm1 = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm1"]);
@ -76,7 +76,7 @@ public:
blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1})); blocks["proj_out"] = std::shared_ptr<GGMLBlock>(new Conv2d(in_channels, in_channels, {1, 1}));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N, in_channels, h, w] // x: [N, in_channels, h, w]
auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<GroupNorm32>(blocks["norm"]);
auto q_proj = std::dynamic_pointer_cast<Conv2d>(blocks["q"]); auto q_proj = std::dynamic_pointer_cast<Conv2d>(blocks["q"]);
@ -134,7 +134,7 @@ public:
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* forward(struct ggml_context* ctx,
struct ggml_tensor* x) { struct ggml_tensor* x) override {
// timesteps always None // timesteps always None
// skip_video always False // skip_video always False
// x: [N, IC, IH, IW] // x: [N, IC, IH, IW]
@ -163,7 +163,7 @@ public:
class VideoResnetBlock : public ResnetBlock { class VideoResnetBlock : public ResnetBlock {
protected: protected:
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_types, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "mix_factor", tensor_types, GGML_TYPE_F32);
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1); params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
} }
@ -182,7 +182,7 @@ public:
blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true)); blocks["time_stack"] = std::shared_ptr<GGMLBlock>(new ResBlock(out_channels, 0, out_channels, {video_kernel_size, 1}, 3, false, true));
} }
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N, in_channels, h, w] aka [b*t, in_channels, h, w] // x: [N, in_channels, h, w] aka [b*t, in_channels, h, w]
// return: [N, out_channels, h, w] aka [b*t, out_channels, h, w] // return: [N, out_channels, h, w] aka [b*t, out_channels, h, w]
// t_emb is always None // t_emb is always None
@ -548,7 +548,7 @@ struct AutoEncoderKL : public VAE {
ae.init(params_ctx, tensor_types, prefix); ae.init(params_ctx, tensor_types, prefix);
} }
void enable_conv2d_direct() { void enable_conv2d_direct() override {
std::vector<GGMLBlock*> blocks; std::vector<GGMLBlock*> blocks;
ae.get_all_blocks(blocks); ae.get_all_blocks(blocks);
for (auto block : blocks) { for (auto block : blocks) {
@ -559,7 +559,7 @@ struct AutoEncoderKL : public VAE {
} }
} }
void set_conv2d_scale(float scale) { void set_conv2d_scale(float scale) override {
std::vector<GGMLBlock*> blocks; std::vector<GGMLBlock*> blocks;
ae.get_all_blocks(blocks); ae.get_all_blocks(blocks);
for (auto block : blocks) { for (auto block : blocks) {
@ -570,11 +570,11 @@ struct AutoEncoderKL : public VAE {
} }
} }
std::string get_desc() { std::string get_desc() override {
return "vae"; return "vae";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {
ae.get_param_tensors(tensors, prefix); ae.get_param_tensors(tensors, prefix);
} }
@ -594,7 +594,7 @@ struct AutoEncoderKL : public VAE {
struct ggml_tensor* z, struct ggml_tensor* z,
bool decode_graph, bool decode_graph,
struct ggml_tensor** output, struct ggml_tensor** output,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
GGML_ASSERT(!decode_only || decode_graph); GGML_ASSERT(!decode_only || decode_graph);
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(z, decode_graph); return build_graph(z, decode_graph);
@ -607,11 +607,11 @@ struct AutoEncoderKL : public VAE {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB params.mem_size = static_cast<size_t>(10 * 1024 * 1024); // 10 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
// CPU, x{1, 3, 64, 64}: Pass // CPU, x{1, 3, 64, 64}: Pass
@ -621,7 +621,7 @@ struct AutoEncoderKL : public VAE {
auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2); auto x = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 64, 64, 3, 2);
ggml_set_f32(x, 0.5f); ggml_set_f32(x, 0.5f);
print_ggml_tensor(x); print_ggml_tensor(x);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, false, &out, work_ctx); compute(8, x, false, &out, work_ctx);
@ -639,7 +639,7 @@ struct AutoEncoderKL : public VAE {
auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1); auto z = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, 8, 8, 4, 1);
ggml_set_f32(z, 0.5f); ggml_set_f32(z, 0.5f);
print_ggml_tensor(z); print_ggml_tensor(z);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, z, true, &out, work_ctx); compute(8, z, true, &out, work_ctx);

144
wan.hpp
View File

@ -2,6 +2,8 @@
#define __WAN_HPP__ #define __WAN_HPP__
#include <map> #include <map>
#include <memory>
#include <utility>
#include "common.hpp" #include "common.hpp"
#include "flux.hpp" #include "flux.hpp"
@ -24,7 +26,7 @@ namespace WAN {
std::tuple<int, int, int> dilation; std::tuple<int, int, int> dilation;
bool bias; bool bias;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
params["weight"] = ggml_new_tensor_4d(ctx, params["weight"] = ggml_new_tensor_4d(ctx,
GGML_TYPE_F16, GGML_TYPE_F16,
std::get<2>(kernel_size), std::get<2>(kernel_size),
@ -46,17 +48,17 @@ namespace WAN {
bool bias = true) bool bias = true)
: in_channels(in_channels), : in_channels(in_channels),
out_channels(out_channels), out_channels(out_channels),
kernel_size(kernel_size), kernel_size(std::move(kernel_size)),
stride(stride), stride(std::move(stride)),
padding(padding), padding(std::move(padding)),
dilation(dilation), dilation(std::move(dilation)),
bias(bias) {} bias(bias) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* cache_x = NULL) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* cache_x = nullptr) {
// x: [N*IC, ID, IH, IW] // x: [N*IC, ID, IH, IW]
// result: x: [N*OC, ID, IH, IW] // result: x: [N*OC, ID, IH, IW]
struct ggml_tensor* w = params["weight"]; struct ggml_tensor* w = params["weight"];
struct ggml_tensor* b = NULL; struct ggml_tensor* b = nullptr;
if (bias) { if (bias) {
b = params["bias"]; b = params["bias"];
} }
@ -68,7 +70,7 @@ namespace WAN {
int lp2 = 2 * std::get<0>(padding); int lp2 = 2 * std::get<0>(padding);
int rp2 = 0; int rp2 = 0;
if (cache_x != NULL && lp2 > 0) { if (cache_x != nullptr && lp2 > 0) {
x = ggml_concat(ctx, cache_x, x, 2); x = ggml_concat(ctx, cache_x, x, 2);
lp2 -= (int)cache_x->ne[2]; lp2 -= (int)cache_x->ne[2];
} }
@ -85,7 +87,7 @@ namespace WAN {
protected: protected:
int64_t dim; int64_t dim;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
ggml_type wtype = GGML_TYPE_F32; ggml_type wtype = GGML_TYPE_F32;
params["gamma"] = ggml_new_tensor_1d(ctx, wtype, dim); params["gamma"] = ggml_new_tensor_1d(ctx, wtype, dim);
} }
@ -94,7 +96,7 @@ namespace WAN {
RMS_norm(int64_t dim) RMS_norm(int64_t dim)
: dim(dim) {} : dim(dim) {}
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) { struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
// x: [N*IC, ID, IH, IW], IC == dim // x: [N*IC, ID, IH, IW], IC == dim
// assert N == 1 // assert N == 1
@ -159,12 +161,12 @@ namespace WAN {
int idx = feat_idx; int idx = feat_idx;
feat_idx += 1; feat_idx += 1;
if (chunk_idx == 0) { if (chunk_idx == 0) {
// feat_cache[idx] == NULL, pass // feat_cache[idx] == nullptr, pass
} else { } else {
auto time_conv = std::dynamic_pointer_cast<CausalConv3d>(blocks["time_conv"]); auto time_conv = std::dynamic_pointer_cast<CausalConv3d>(blocks["time_conv"]);
auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]); auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]);
if (cache_x->ne[2] < 2 && feat_cache[idx] != NULL) { // chunk_idx >= 2 if (cache_x->ne[2] < 2 && feat_cache[idx] != nullptr) { // chunk_idx >= 2
// cache last frame of last two chunk // cache last frame of last two chunk
cache_x = ggml_concat(ctx, cache_x = ggml_concat(ctx,
ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]), ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]),
@ -209,7 +211,7 @@ namespace WAN {
if (mode == "downsample3d") { if (mode == "downsample3d") {
if (feat_cache.size() > 0) { if (feat_cache.size() > 0) {
int idx = feat_idx; int idx = feat_idx;
if (feat_cache[idx] == NULL) { if (feat_cache[idx] == nullptr) {
feat_cache[idx] = x; feat_cache[idx] = x;
feat_idx += 1; feat_idx += 1;
} else { } else {
@ -373,7 +375,7 @@ namespace WAN {
if (feat_cache.size() > 0) { if (feat_cache.size() > 0) {
int idx = feat_idx; int idx = feat_idx;
auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]); auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]);
if (cache_x->ne[2] < 2 && feat_cache[idx] != NULL) { if (cache_x->ne[2] < 2 && feat_cache[idx] != nullptr) {
// cache last frame of last two chunk // cache last frame of last two chunk
cache_x = ggml_concat(ctx, cache_x = ggml_concat(ctx,
ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]), ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]),
@ -566,7 +568,7 @@ namespace WAN {
x = ggml_nn_attention(ctx, q, k, v, false); // [t, h * w, c] x = ggml_nn_attention(ctx, q, k, v, false); // [t, h * w, c]
// v = ggml_cont(ctx, ggml_torch_permute(ctx, v, 1, 0, 2, 3)); // [t, h * w, c] // v = ggml_cont(ctx, ggml_torch_permute(ctx, v, 1, 0, 2, 3)); // [t, h * w, c]
// x = ggml_nn_attention_ext(ctx, q, k, v, q->ne[2], NULL, false, false, true); // x = ggml_nn_attention_ext(ctx, q, k, v, q->ne[2], nullptr, false, false, true);
x = ggml_nn_cont(ctx, ggml_permute(ctx, x, 1, 0, 2, 3)); // [t, c, h * w] x = ggml_nn_cont(ctx, ggml_permute(ctx, x, 1, 0, 2, 3)); // [t, c, h * w]
x = ggml_reshape_4d(ctx, x, w, h, c, n); // [t, c, h, w] x = ggml_reshape_4d(ctx, x, w, h, c, n); // [t, c, h, w]
@ -672,7 +674,7 @@ namespace WAN {
if (feat_cache.size() > 0) { if (feat_cache.size() > 0) {
int idx = feat_idx; int idx = feat_idx;
auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]); auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]);
if (cache_x->ne[2] < 2 && feat_cache[idx] != NULL) { if (cache_x->ne[2] < 2 && feat_cache[idx] != nullptr) {
// cache last frame of last two chunk // cache last frame of last two chunk
cache_x = ggml_concat(ctx, cache_x = ggml_concat(ctx,
ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]), ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]),
@ -724,7 +726,7 @@ namespace WAN {
if (feat_cache.size() > 0) { if (feat_cache.size() > 0) {
int idx = feat_idx; int idx = feat_idx;
auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]); auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]);
if (cache_x->ne[2] < 2 && feat_cache[idx] != NULL) { if (cache_x->ne[2] < 2 && feat_cache[idx] != nullptr) {
// cache last frame of last two chunk // cache last frame of last two chunk
cache_x = ggml_concat(ctx, cache_x = ggml_concat(ctx,
ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]), ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]),
@ -843,7 +845,7 @@ namespace WAN {
if (feat_cache.size() > 0) { if (feat_cache.size() > 0) {
int idx = feat_idx; int idx = feat_idx;
auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]); auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]);
if (cache_x->ne[2] < 2 && feat_cache[idx] != NULL) { if (cache_x->ne[2] < 2 && feat_cache[idx] != nullptr) {
// cache last frame of last two chunk // cache last frame of last two chunk
cache_x = ggml_concat(ctx, cache_x = ggml_concat(ctx,
ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]), ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]),
@ -895,7 +897,7 @@ namespace WAN {
if (feat_cache.size() > 0) { if (feat_cache.size() > 0) {
int idx = feat_idx; int idx = feat_idx;
auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]); auto cache_x = ggml_slice(ctx, x, 2, -CACHE_T, x->ne[2]);
if (cache_x->ne[2] < 2 && feat_cache[idx] != NULL) { if (cache_x->ne[2] < 2 && feat_cache[idx] != nullptr) {
// cache last frame of last two chunk // cache last frame of last two chunk
cache_x = ggml_concat(ctx, cache_x = ggml_concat(ctx,
ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]), ggml_slice(ctx, feat_cache[idx], 2, -1, feat_cache[idx]->ne[2]),
@ -935,9 +937,9 @@ namespace WAN {
void clear_cache() { void clear_cache() {
_conv_idx = 0; _conv_idx = 0;
_feat_map = std::vector<struct ggml_tensor*>(_conv_num, NULL); _feat_map = std::vector<struct ggml_tensor*>(_conv_num, nullptr);
_enc_conv_idx = 0; _enc_conv_idx = 0;
_enc_feat_map = std::vector<struct ggml_tensor*>(_enc_conv_num, NULL); _enc_feat_map = std::vector<struct ggml_tensor*>(_enc_conv_num, nullptr);
} }
public: public:
@ -1116,11 +1118,11 @@ namespace WAN {
ae.init(params_ctx, tensor_types, prefix); ae.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return "wan_vae"; return "wan_vae";
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) override {
ae.get_param_tensors(tensors, prefix); ae.get_param_tensors(tensors, prefix);
} }
@ -1152,7 +1154,7 @@ namespace WAN {
for (int64_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) { for (int64_t feat_idx = 0; feat_idx < ae._feat_map.size(); feat_idx++) {
ggml_tensor* feat_cache = ae._feat_map[feat_idx]; ggml_tensor* feat_cache = ae._feat_map[feat_idx];
if (feat_cache != NULL) { if (feat_cache != nullptr) {
cache("feat_idx:" + std::to_string(feat_idx), feat_cache); cache("feat_idx:" + std::to_string(feat_idx), feat_cache);
ggml_build_forward_expand(gf, feat_cache); ggml_build_forward_expand(gf, feat_cache);
} }
@ -1167,7 +1169,7 @@ namespace WAN {
struct ggml_tensor* z, struct ggml_tensor* z,
bool decode_graph, bool decode_graph,
struct ggml_tensor** output, struct ggml_tensor** output,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) override {
if (true) { if (true) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(z, decode_graph); return build_graph(z, decode_graph);
@ -1180,7 +1182,7 @@ namespace WAN {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph_partial(z, decode_graph, i); return build_graph_partial(z, decode_graph, i);
}; };
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx); GGMLRunner::compute(get_graph, n_threads, true, &out, output_ctx);
ae.clear_cache(); ae.clear_cache();
if (t == 1) { if (t == 1) {
@ -1220,11 +1222,11 @@ namespace WAN {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G params.mem_size = static_cast<size_t>(1024 * 1024) * 1024; // 1G
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
if (true) { if (true) {
// cpu f32, pass // cpu f32, pass
@ -1235,7 +1237,7 @@ namespace WAN {
ggml_set_f32(z, 0.5f); ggml_set_f32(z, 0.5f);
z = load_tensor_from_file(work_ctx, "wan_vae_z.bin"); z = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
print_ggml_tensor(z); print_ggml_tensor(z);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int64_t t0 = ggml_time_ms(); int64_t t0 = ggml_time_ms();
compute(8, z, true, &out, work_ctx); compute(8, z, true, &out, work_ctx);
@ -1250,7 +1252,7 @@ namespace WAN {
// ggml_backend_t backend = ggml_backend_cuda_init(0); // ggml_backend_t backend = ggml_backend_cuda_init(0);
ggml_backend_t backend = ggml_backend_cpu_init(); ggml_backend_t backend = ggml_backend_cpu_init();
ggml_type model_data_type = GGML_TYPE_F16; ggml_type model_data_type = GGML_TYPE_F16;
std::shared_ptr<WanVAERunner> vae = std::shared_ptr<WanVAERunner>(new WanVAERunner(backend, false, {}, "", false, VERSION_WAN2_2_TI2V)); std::shared_ptr<WanVAERunner> vae = std::make_shared<WanVAERunner>(backend, false, String2GGMLType{}, "", false, VERSION_WAN2_2_TI2V);
{ {
LOG_INFO("loading from '%s'", file_path.c_str()); LOG_INFO("loading from '%s'", file_path.c_str());
@ -1309,7 +1311,7 @@ namespace WAN {
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* mask = NULL) { struct ggml_tensor* mask = nullptr) {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// pe: [n_token, d_head/2, 2, 2] // pe: [n_token, d_head/2, 2, 2]
// return [N, n_token, dim] // return [N, n_token, dim]
@ -1367,7 +1369,7 @@ namespace WAN {
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* context, struct ggml_tensor* context,
int64_t context_img_len) { int64_t context_img_len) override {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// context: [N, n_context, dim] // context: [N, n_context, dim]
// context_img_len: unused // context_img_len: unused
@ -1388,7 +1390,7 @@ namespace WAN {
k = norm_k->forward(ctx, k); k = norm_k->forward(ctx, k);
auto v = v_proj->forward(ctx, context); // [N, n_context, dim] auto v = v_proj->forward(ctx, context); // [N, n_context, dim]
x = ggml_nn_attention_ext(ctx, backend, q, k, v, num_heads, NULL, false, false, flash_attn); // [N, n_token, dim] x = ggml_nn_attention_ext(ctx, backend, q, k, v, num_heads, nullptr, false, false, flash_attn); // [N, n_token, dim]
x = o_proj->forward(ctx, x); // [N, n_token, dim] x = o_proj->forward(ctx, x); // [N, n_token, dim]
return x; return x;
@ -1417,7 +1419,7 @@ namespace WAN {
ggml_backend_t backend, ggml_backend_t backend,
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* context, struct ggml_tensor* context,
int64_t context_img_len) { int64_t context_img_len) override {
// x: [N, n_token, dim] // x: [N, n_token, dim]
// context: [N, context_img_len + context_txt_len, dim] // context: [N, context_img_len + context_txt_len, dim]
// return [N, n_token, dim] // return [N, n_token, dim]
@ -1455,8 +1457,8 @@ namespace WAN {
k_img = norm_k_img->forward(ctx, k_img); k_img = norm_k_img->forward(ctx, k_img);
auto v_img = v_img_proj->forward(ctx, context_img); // [N, context_img_len, dim] auto v_img = v_img_proj->forward(ctx, context_img); // [N, context_img_len, dim]
auto img_x = ggml_nn_attention_ext(ctx, backend, q, k_img, v_img, num_heads, NULL, false, false, flash_attn); // [N, n_token, dim] auto img_x = ggml_nn_attention_ext(ctx, backend, q, k_img, v_img, num_heads, nullptr, false, false, flash_attn); // [N, n_token, dim]
x = ggml_nn_attention_ext(ctx, backend, q, k, v, num_heads, NULL, false, false, flash_attn); // [N, n_token, dim] x = ggml_nn_attention_ext(ctx, backend, q, k, v, num_heads, nullptr, false, false, flash_attn); // [N, n_token, dim]
x = ggml_add(ctx, x, img_x); x = ggml_add(ctx, x, img_x);
@ -1497,7 +1499,7 @@ namespace WAN {
protected: protected:
int dim; int dim;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1); params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
} }
@ -1587,7 +1589,7 @@ namespace WAN {
class VaceWanAttentionBlock : public WanAttentionBlock { class VaceWanAttentionBlock : public WanAttentionBlock {
protected: protected:
int block_id; int block_id;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1); params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 6, 1);
} }
@ -1641,7 +1643,7 @@ namespace WAN {
protected: protected:
int dim; int dim;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32); enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32);
params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1); params["modulation"] = ggml_new_tensor_3d(ctx, wtype, dim, 2, 1);
} }
@ -1688,7 +1690,7 @@ namespace WAN {
int in_dim; int in_dim;
int flf_pos_embed_token_number; int flf_pos_embed_token_number;
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") { void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
if (flf_pos_embed_token_number > 0) { if (flf_pos_embed_token_number > 0) {
params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1); params["emb_pos"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, in_dim, flf_pos_embed_token_number, 1);
} }
@ -1876,8 +1878,8 @@ namespace WAN {
struct ggml_tensor* timestep, struct ggml_tensor* timestep,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = nullptr,
struct ggml_tensor* vace_context = NULL, struct ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f, float vace_strength = 1.f,
int64_t N = 1) { int64_t N = 1) {
// x: [N*C, T, H, W], C => in_dim // x: [N*C, T, H, W], C => in_dim
@ -1920,7 +1922,7 @@ namespace WAN {
context = text_embedding_2->forward(ctx, context); // [N, context_txt_len, dim] context = text_embedding_2->forward(ctx, context); // [N, context_txt_len, dim]
int64_t context_img_len = 0; int64_t context_img_len = 0;
if (clip_fea != NULL) { if (clip_fea != nullptr) {
if (params.model_type == "i2v") { if (params.model_type == "i2v") {
auto img_emb = std::dynamic_pointer_cast<MLPProj>(blocks["img_emb"]); auto img_emb = std::dynamic_pointer_cast<MLPProj>(blocks["img_emb"]);
auto context_img = img_emb->forward(ctx, clip_fea); // [N, context_img_len, dim] auto context_img = img_emb->forward(ctx, clip_fea); // [N, context_img_len, dim]
@ -1930,7 +1932,7 @@ namespace WAN {
} }
// vace_patch_embedding // vace_patch_embedding
ggml_tensor* c = NULL; ggml_tensor* c = nullptr;
if (params.vace_layers > 0) { if (params.vace_layers > 0) {
auto vace_patch_embedding = std::dynamic_pointer_cast<Conv3d>(blocks["vace_patch_embedding"]); auto vace_patch_embedding = std::dynamic_pointer_cast<Conv3d>(blocks["vace_patch_embedding"]);
@ -1971,9 +1973,9 @@ namespace WAN {
struct ggml_tensor* timestep, struct ggml_tensor* timestep,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* pe, struct ggml_tensor* pe,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = nullptr,
struct ggml_tensor* time_dim_concat = NULL, struct ggml_tensor* time_dim_concat = nullptr,
struct ggml_tensor* vace_context = NULL, struct ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f, float vace_strength = 1.f,
int64_t N = 1) { int64_t N = 1) {
// Forward pass of DiT. // Forward pass of DiT.
@ -1997,7 +1999,7 @@ namespace WAN {
int64_t h_len = ((H + (std::get<1>(params.patch_size) / 2)) / std::get<1>(params.patch_size)); int64_t h_len = ((H + (std::get<1>(params.patch_size) / 2)) / std::get<1>(params.patch_size));
int64_t w_len = ((W + (std::get<2>(params.patch_size) / 2)) / std::get<2>(params.patch_size)); int64_t w_len = ((W + (std::get<2>(params.patch_size) / 2)) / std::get<2>(params.patch_size));
if (time_dim_concat != NULL) { if (time_dim_concat != nullptr) {
time_dim_concat = pad_to_patch_size(ctx, time_dim_concat); time_dim_concat = pad_to_patch_size(ctx, time_dim_concat);
x = ggml_concat(ctx, x, time_dim_concat, 2); // [N*C, (T+pad_t) + (T2+pad_t2), H + pad_h, W + pad_w] x = ggml_concat(ctx, x, time_dim_concat, 2); // [N*C, (T+pad_t) + (T2+pad_t2), H + pad_h, W + pad_w]
t_len = ((x->ne[2] + (std::get<0>(params.patch_size) / 2)) / std::get<0>(params.patch_size)); t_len = ((x->ne[2] + (std::get<0>(params.patch_size) / 2)) / std::get<0>(params.patch_size));
@ -2134,7 +2136,7 @@ namespace WAN {
wan.init(params_ctx, tensor_types, prefix); wan.init(params_ctx, tensor_types, prefix);
} }
std::string get_desc() { std::string get_desc() override {
return desc; return desc;
} }
@ -2145,10 +2147,10 @@ namespace WAN {
struct ggml_cgraph* build_graph(struct ggml_tensor* x, struct ggml_cgraph* build_graph(struct ggml_tensor* x,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = nullptr,
struct ggml_tensor* c_concat = NULL, struct ggml_tensor* c_concat = nullptr,
struct ggml_tensor* time_dim_concat = NULL, struct ggml_tensor* time_dim_concat = nullptr,
struct ggml_tensor* vace_context = NULL, struct ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f) { float vace_strength = 1.f) {
struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, WAN_GRAPH_SIZE, false); struct ggml_cgraph* gf = ggml_new_graph_custom(compute_ctx, WAN_GRAPH_SIZE, false);
@ -2174,10 +2176,10 @@ namespace WAN {
auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, wan_params.axes_dim_sum / 2, pos_len); auto pe = ggml_new_tensor_4d(compute_ctx, GGML_TYPE_F32, 2, 2, wan_params.axes_dim_sum / 2, pos_len);
// pe->data = pe_vec.data(); // pe->data = pe_vec.data();
// print_ggml_tensor(pe); // print_ggml_tensor(pe);
// pe->data = NULL; // pe->data = nullptr;
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
if (c_concat != NULL) { if (c_concat != nullptr) {
x = ggml_concat(compute_ctx, x, c_concat, 3); x = ggml_concat(compute_ctx, x, c_concat, 3);
} }
@ -2201,13 +2203,13 @@ namespace WAN {
struct ggml_tensor* x, struct ggml_tensor* x,
struct ggml_tensor* timesteps, struct ggml_tensor* timesteps,
struct ggml_tensor* context, struct ggml_tensor* context,
struct ggml_tensor* clip_fea = NULL, struct ggml_tensor* clip_fea = nullptr,
struct ggml_tensor* c_concat = NULL, struct ggml_tensor* c_concat = nullptr,
struct ggml_tensor* time_dim_concat = NULL, struct ggml_tensor* time_dim_concat = nullptr,
struct ggml_tensor* vace_context = NULL, struct ggml_tensor* vace_context = nullptr,
float vace_strength = 1.f, float vace_strength = 1.f,
struct ggml_tensor** output = NULL, struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = NULL) { struct ggml_context* output_ctx = nullptr) {
auto get_graph = [&]() -> struct ggml_cgraph* { auto get_graph = [&]() -> struct ggml_cgraph* {
return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength); return build_graph(x, timesteps, context, clip_fea, c_concat, time_dim_concat, vace_context, vace_strength);
}; };
@ -2218,11 +2220,11 @@ namespace WAN {
void test() { void test() {
struct ggml_init_params params; struct ggml_init_params params;
params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB params.mem_size = static_cast<size_t>(200 * 1024 * 1024); // 200 MB
params.mem_buffer = NULL; params.mem_buffer = nullptr;
params.no_alloc = false; params.no_alloc = false;
struct ggml_context* work_ctx = ggml_init(params); struct ggml_context* work_ctx = ggml_init(params);
GGML_ASSERT(work_ctx != NULL); GGML_ASSERT(work_ctx != nullptr);
{ {
// cpu f16: pass // cpu f16: pass
@ -2244,10 +2246,10 @@ namespace WAN {
// auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin"); // auto clip_fea = load_tensor_from_file(work_ctx, "wan_dit_clip_fea.bin");
// print_ggml_tensor(clip_fea); // print_ggml_tensor(clip_fea);
struct ggml_tensor* out = NULL; struct ggml_tensor* out = nullptr;
int t0 = ggml_time_ms(); int t0 = ggml_time_ms();
compute(8, x, timesteps, context, NULL, NULL, NULL, NULL, 1.f, &out, work_ctx); compute(8, x, timesteps, context, nullptr, nullptr, nullptr, nullptr, 1.f, &out, work_ctx);
int t1 = ggml_time_ms(); int t1 = ggml_time_ms();
print_ggml_tensor(out); print_ggml_tensor(out);
@ -2275,12 +2277,12 @@ namespace WAN {
} }
} }
std::shared_ptr<WanRunner> wan = std::shared_ptr<WanRunner>(new WanRunner(backend, std::shared_ptr<WanRunner> wan = std::make_shared<WanRunner>(backend,
false, false,
tensor_types, tensor_types,
"model.diffusion_model", "model.diffusion_model",
VERSION_WAN2_2_TI2V, VERSION_WAN2_2_TI2V,
true)); true);
wan->alloc_params_buffer(); wan->alloc_params_buffer();
std::map<std::string, ggml_tensor*> tensors; std::map<std::string, ggml_tensor*> tensors;