Merge branch 'master' into chroma_radiance

This commit is contained in:
leejet 2025-10-25 23:41:41 +08:00
commit c052f033fb
7 changed files with 444 additions and 120 deletions

View File

@ -35,6 +35,7 @@ API and command-line option may change frequently.***
- Image Models - Image Models
- SD1.x, SD2.x, [SD-Turbo](https://huggingface.co/stabilityai/sd-turbo) - SD1.x, SD2.x, [SD-Turbo](https://huggingface.co/stabilityai/sd-turbo)
- SDXL, [SDXL-Turbo](https://huggingface.co/stabilityai/sdxl-turbo) - SDXL, [SDXL-Turbo](https://huggingface.co/stabilityai/sdxl-turbo)
- [some SD1.x and SDXL distilled models](./docs/distilled_sd.md)
- [SD3/SD3.5](./docs/sd3.md) - [SD3/SD3.5](./docs/sd3.md)
- [Flux-dev/Flux-schnell](./docs/flux.md) - [Flux-dev/Flux-schnell](./docs/flux.md)
- [Chroma](./docs/chroma.md) - [Chroma](./docs/chroma.md)

View File

@ -673,33 +673,80 @@ struct SD3CLIPEmbedder : public Conditioner {
bool offload_params_to_cpu, bool offload_params_to_cpu,
const String2GGMLType& tensor_types = {}) const String2GGMLType& tensor_types = {})
: clip_g_tokenizer(0) { : clip_g_tokenizer(0) {
clip_l = std::make_shared<CLIPTextModelRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); bool use_clip_l = false;
clip_g = std::make_shared<CLIPTextModelRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); bool use_clip_g = false;
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer"); bool use_t5 = false;
for (auto pair : tensor_types) {
if (pair.first.find("text_encoders.clip_l") != std::string::npos) {
use_clip_l = true;
} else if (pair.first.find("text_encoders.clip_g") != std::string::npos) {
use_clip_g = true;
} else if (pair.first.find("text_encoders.t5xxl") != std::string::npos) {
use_t5 = true;
}
}
if (!use_clip_l && !use_clip_g && !use_t5) {
LOG_WARN("IMPORTANT NOTICE: No text encoders provided, cannot process prompts!");
return;
}
if (use_clip_l) {
clip_l = std::make_shared<CLIPTextModelRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false);
}
if (use_clip_g) {
clip_g = std::make_shared<CLIPTextModelRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false);
}
if (use_t5) {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer");
}
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model"); if (clip_l) {
clip_g->get_param_tensors(tensors, "text_encoders.clip_g.transformer.text_model"); clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer"); }
if (clip_g) {
clip_g->get_param_tensors(tensors, "text_encoders.clip_g.transformer.text_model");
}
if (t5) {
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
}
} }
void alloc_params_buffer() override { void alloc_params_buffer() override {
clip_l->alloc_params_buffer(); if (clip_l) {
clip_g->alloc_params_buffer(); clip_l->alloc_params_buffer();
t5->alloc_params_buffer(); }
if (clip_g) {
clip_g->alloc_params_buffer();
}
if (t5) {
t5->alloc_params_buffer();
}
} }
void free_params_buffer() override { void free_params_buffer() override {
clip_l->free_params_buffer(); if (clip_l) {
clip_g->free_params_buffer(); clip_l->free_params_buffer();
t5->free_params_buffer(); }
if (clip_g) {
clip_g->free_params_buffer();
}
if (t5) {
t5->free_params_buffer();
}
} }
size_t get_params_buffer_size() override { size_t get_params_buffer_size() override {
size_t buffer_size = clip_l->get_params_buffer_size(); size_t buffer_size = 0;
buffer_size += clip_g->get_params_buffer_size(); if (clip_l) {
buffer_size += t5->get_params_buffer_size(); buffer_size += clip_l->get_params_buffer_size();
}
if (clip_g) {
buffer_size += clip_g->get_params_buffer_size();
}
if (t5) {
buffer_size += t5->get_params_buffer_size();
}
return buffer_size; return buffer_size;
} }
@ -731,23 +778,32 @@ struct SD3CLIPEmbedder : public Conditioner {
for (const auto& item : parsed_attention) { for (const auto& item : parsed_attention) {
const std::string& curr_text = item.first; const std::string& curr_text = item.first;
float curr_weight = item.second; float curr_weight = item.second;
if (clip_l) {
std::vector<int> curr_tokens = clip_l_tokenizer.encode(curr_text, on_new_token_cb); std::vector<int> curr_tokens = clip_l_tokenizer.encode(curr_text, on_new_token_cb);
clip_l_tokens.insert(clip_l_tokens.end(), curr_tokens.begin(), curr_tokens.end()); clip_l_tokens.insert(clip_l_tokens.end(), curr_tokens.begin(), curr_tokens.end());
clip_l_weights.insert(clip_l_weights.end(), curr_tokens.size(), curr_weight); clip_l_weights.insert(clip_l_weights.end(), curr_tokens.size(), curr_weight);
}
curr_tokens = clip_g_tokenizer.encode(curr_text, on_new_token_cb); if (clip_g) {
clip_g_tokens.insert(clip_g_tokens.end(), curr_tokens.begin(), curr_tokens.end()); std::vector<int> curr_tokens = clip_g_tokenizer.encode(curr_text, on_new_token_cb);
clip_g_weights.insert(clip_g_weights.end(), curr_tokens.size(), curr_weight); clip_g_tokens.insert(clip_g_tokens.end(), curr_tokens.begin(), curr_tokens.end());
clip_g_weights.insert(clip_g_weights.end(), curr_tokens.size(), curr_weight);
curr_tokens = t5_tokenizer.Encode(curr_text, true); }
t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end()); if (t5) {
t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight); std::vector<int> curr_tokens = t5_tokenizer.Encode(curr_text, true);
t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end());
t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight);
}
} }
clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, max_length, padding); if (clip_l) {
clip_g_tokenizer.pad_tokens(clip_g_tokens, clip_g_weights, max_length, padding); clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, max_length, padding);
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, nullptr, max_length, padding); }
if (clip_g) {
clip_g_tokenizer.pad_tokens(clip_g_tokens, clip_g_weights, max_length, padding);
}
if (t5) {
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, nullptr, max_length, padding);
}
// for (int i = 0; i < clip_l_tokens.size(); i++) { // for (int i = 0; i < clip_l_tokens.size(); i++) {
// std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", "; // std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", ";
@ -795,10 +851,10 @@ struct SD3CLIPEmbedder : public Conditioner {
std::vector<float> hidden_states_vec; std::vector<float> hidden_states_vec;
size_t chunk_len = 77; size_t chunk_len = 77;
size_t chunk_count = clip_l_tokens.size() / chunk_len; size_t chunk_count = std::max(std::max(clip_l_tokens.size(), clip_g_tokens.size()), t5_tokens.size()) / chunk_len;
for (int chunk_idx = 0; chunk_idx < chunk_count; chunk_idx++) { for (int chunk_idx = 0; chunk_idx < chunk_count; chunk_idx++) {
// clip_l // clip_l
{ if (clip_l) {
std::vector<int> chunk_tokens(clip_l_tokens.begin() + chunk_idx * chunk_len, std::vector<int> chunk_tokens(clip_l_tokens.begin() + chunk_idx * chunk_len,
clip_l_tokens.begin() + (chunk_idx + 1) * chunk_len); clip_l_tokens.begin() + (chunk_idx + 1) * chunk_len);
std::vector<float> chunk_weights(clip_l_weights.begin() + chunk_idx * chunk_len, std::vector<float> chunk_weights(clip_l_weights.begin() + chunk_idx * chunk_len,
@ -845,10 +901,17 @@ struct SD3CLIPEmbedder : public Conditioner {
&pooled_l, &pooled_l,
work_ctx); work_ctx);
} }
} else {
chunk_hidden_states_l = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 768, chunk_len);
ggml_set_f32(chunk_hidden_states_l, 0.f);
if (chunk_idx == 0) {
pooled_l = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 768);
ggml_set_f32(pooled_l, 0.f);
}
} }
// clip_g // clip_g
{ if (clip_g) {
std::vector<int> chunk_tokens(clip_g_tokens.begin() + chunk_idx * chunk_len, std::vector<int> chunk_tokens(clip_g_tokens.begin() + chunk_idx * chunk_len,
clip_g_tokens.begin() + (chunk_idx + 1) * chunk_len); clip_g_tokens.begin() + (chunk_idx + 1) * chunk_len);
std::vector<float> chunk_weights(clip_g_weights.begin() + chunk_idx * chunk_len, std::vector<float> chunk_weights(clip_g_weights.begin() + chunk_idx * chunk_len,
@ -896,10 +959,17 @@ struct SD3CLIPEmbedder : public Conditioner {
&pooled_g, &pooled_g,
work_ctx); work_ctx);
} }
} else {
chunk_hidden_states_g = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 1280, chunk_len);
ggml_set_f32(chunk_hidden_states_g, 0.f);
if (chunk_idx == 0) {
pooled_g = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 1280);
ggml_set_f32(pooled_g, 0.f);
}
} }
// t5 // t5
{ if (t5) {
std::vector<int> chunk_tokens(t5_tokens.begin() + chunk_idx * chunk_len, std::vector<int> chunk_tokens(t5_tokens.begin() + chunk_idx * chunk_len,
t5_tokens.begin() + (chunk_idx + 1) * chunk_len); t5_tokens.begin() + (chunk_idx + 1) * chunk_len);
std::vector<float> chunk_weights(t5_weights.begin() + chunk_idx * chunk_len, std::vector<float> chunk_weights(t5_weights.begin() + chunk_idx * chunk_len,
@ -927,6 +997,9 @@ struct SD3CLIPEmbedder : public Conditioner {
float new_mean = ggml_tensor_mean(tensor); float new_mean = ggml_tensor_mean(tensor);
ggml_tensor_scale(tensor, (original_mean / new_mean)); ggml_tensor_scale(tensor, (original_mean / new_mean));
} }
} else {
chunk_hidden_states_t5 = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 4096, chunk_len);
ggml_set_f32(chunk_hidden_states_t5, 0.f);
} }
auto chunk_hidden_states_lg_pad = ggml_new_tensor_3d(work_ctx, auto chunk_hidden_states_lg_pad = ggml_new_tensor_3d(work_ctx,
@ -969,11 +1042,20 @@ struct SD3CLIPEmbedder : public Conditioner {
((float*)chunk_hidden_states->data) + ggml_nelements(chunk_hidden_states)); ((float*)chunk_hidden_states->data) + ggml_nelements(chunk_hidden_states));
} }
hidden_states = vector_to_ggml_tensor(work_ctx, hidden_states_vec); if (hidden_states_vec.size() > 0) {
hidden_states = ggml_reshape_2d(work_ctx, hidden_states = vector_to_ggml_tensor(work_ctx, hidden_states_vec);
hidden_states, hidden_states = ggml_reshape_2d(work_ctx,
chunk_hidden_states->ne[0], hidden_states,
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); chunk_hidden_states->ne[0],
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]);
} else {
hidden_states = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 4096, 256);
ggml_set_f32(hidden_states, 0.f);
}
if (pooled == nullptr) {
pooled = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 2048);
ggml_set_f32(pooled, 0.f);
}
return {hidden_states, pooled, nullptr}; return {hidden_states, pooled, nullptr};
} }
@ -999,28 +1081,68 @@ struct FluxCLIPEmbedder : public Conditioner {
FluxCLIPEmbedder(ggml_backend_t backend, FluxCLIPEmbedder(ggml_backend_t backend,
bool offload_params_to_cpu, bool offload_params_to_cpu,
const String2GGMLType& tensor_types = {}) { const String2GGMLType& tensor_types = {}) {
clip_l = std::make_shared<CLIPTextModelRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); bool use_clip_l = false;
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer"); bool use_t5 = false;
for (auto pair : tensor_types) {
if (pair.first.find("text_encoders.clip_l") != std::string::npos) {
use_clip_l = true;
} else if (pair.first.find("text_encoders.t5xxl") != std::string::npos) {
use_t5 = true;
}
}
if (!use_clip_l && !use_t5) {
LOG_WARN("IMPORTANT NOTICE: No text encoders provided, cannot process prompts!");
return;
}
if (use_clip_l) {
clip_l = std::make_shared<CLIPTextModelRunner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true);
} else {
LOG_WARN("clip_l text encoder not found! Prompt adherence might be degraded.");
}
if (use_t5) {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer");
} else {
LOG_WARN("t5xxl text encoder not found! Prompt adherence might be degraded.");
}
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model"); if (clip_l) {
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer"); clip_l->get_param_tensors(tensors, "text_encoders.clip_l.transformer.text_model");
}
if (t5) {
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
}
} }
void alloc_params_buffer() override { void alloc_params_buffer() override {
clip_l->alloc_params_buffer(); if (clip_l) {
t5->alloc_params_buffer(); clip_l->alloc_params_buffer();
}
if (t5) {
t5->alloc_params_buffer();
}
} }
void free_params_buffer() override { void free_params_buffer() override {
clip_l->free_params_buffer(); if (clip_l) {
t5->free_params_buffer(); clip_l->free_params_buffer();
}
if (t5) {
t5->free_params_buffer();
}
} }
size_t get_params_buffer_size() override { size_t get_params_buffer_size() override {
size_t buffer_size = clip_l->get_params_buffer_size(); size_t buffer_size = 0;
buffer_size += t5->get_params_buffer_size(); if (clip_l) {
buffer_size += clip_l->get_params_buffer_size();
}
if (t5) {
buffer_size += t5->get_params_buffer_size();
}
return buffer_size; return buffer_size;
} }
@ -1050,18 +1172,24 @@ struct FluxCLIPEmbedder : public Conditioner {
for (const auto& item : parsed_attention) { for (const auto& item : parsed_attention) {
const std::string& curr_text = item.first; const std::string& curr_text = item.first;
float curr_weight = item.second; float curr_weight = item.second;
if (clip_l) {
std::vector<int> curr_tokens = clip_l_tokenizer.encode(curr_text, on_new_token_cb); std::vector<int> curr_tokens = clip_l_tokenizer.encode(curr_text, on_new_token_cb);
clip_l_tokens.insert(clip_l_tokens.end(), curr_tokens.begin(), curr_tokens.end()); clip_l_tokens.insert(clip_l_tokens.end(), curr_tokens.begin(), curr_tokens.end());
clip_l_weights.insert(clip_l_weights.end(), curr_tokens.size(), curr_weight); clip_l_weights.insert(clip_l_weights.end(), curr_tokens.size(), curr_weight);
}
curr_tokens = t5_tokenizer.Encode(curr_text, true); if (t5) {
t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end()); std::vector<int> curr_tokens = t5_tokenizer.Encode(curr_text, true);
t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight); t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end());
t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight);
}
} }
clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, 77, padding); if (clip_l) {
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, nullptr, max_length, padding); clip_l_tokenizer.pad_tokens(clip_l_tokens, clip_l_weights, 77, padding);
}
if (t5) {
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, nullptr, max_length, padding);
}
// for (int i = 0; i < clip_l_tokens.size(); i++) { // for (int i = 0; i < clip_l_tokens.size(); i++) {
// std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", "; // std::cout << clip_l_tokens[i] << ":" << clip_l_weights[i] << ", ";
@ -1096,35 +1224,37 @@ struct FluxCLIPEmbedder : public Conditioner {
struct ggml_tensor* pooled = nullptr; // [768,] struct ggml_tensor* pooled = nullptr; // [768,]
std::vector<float> hidden_states_vec; std::vector<float> hidden_states_vec;
size_t chunk_count = t5_tokens.size() / chunk_len; size_t chunk_count = std::max(clip_l_tokens.size() > 0 ? chunk_len : 0, t5_tokens.size()) / chunk_len;
for (int chunk_idx = 0; chunk_idx < chunk_count; chunk_idx++) { for (int chunk_idx = 0; chunk_idx < chunk_count; chunk_idx++) {
// clip_l // clip_l
if (chunk_idx == 0) { if (chunk_idx == 0) {
size_t chunk_len_l = 77; if (clip_l) {
std::vector<int> chunk_tokens(clip_l_tokens.begin(), size_t chunk_len_l = 77;
clip_l_tokens.begin() + chunk_len_l); std::vector<int> chunk_tokens(clip_l_tokens.begin(),
std::vector<float> chunk_weights(clip_l_weights.begin(), clip_l_tokens.begin() + chunk_len_l);
clip_l_weights.begin() + chunk_len_l); std::vector<float> chunk_weights(clip_l_weights.begin(),
clip_l_weights.begin() + chunk_len_l);
auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens); auto input_ids = vector_to_ggml_tensor_i32(work_ctx, chunk_tokens);
size_t max_token_idx = 0; size_t max_token_idx = 0;
auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID); auto it = std::find(chunk_tokens.begin(), chunk_tokens.end(), clip_l_tokenizer.EOS_TOKEN_ID);
max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1); max_token_idx = std::min<size_t>(std::distance(chunk_tokens.begin(), it), chunk_tokens.size() - 1);
clip_l->compute(n_threads, clip_l->compute(n_threads,
input_ids, input_ids,
0, 0,
nullptr, nullptr,
max_token_idx, max_token_idx,
true, true,
clip_skip, clip_skip,
&pooled, &pooled,
work_ctx); work_ctx);
}
} }
// t5 // t5
{ if (t5) {
std::vector<int> chunk_tokens(t5_tokens.begin() + chunk_idx * chunk_len, std::vector<int> chunk_tokens(t5_tokens.begin() + chunk_idx * chunk_len,
t5_tokens.begin() + (chunk_idx + 1) * chunk_len); t5_tokens.begin() + (chunk_idx + 1) * chunk_len);
std::vector<float> chunk_weights(t5_weights.begin() + chunk_idx * chunk_len, std::vector<float> chunk_weights(t5_weights.begin() + chunk_idx * chunk_len,
@ -1152,6 +1282,9 @@ struct FluxCLIPEmbedder : public Conditioner {
float new_mean = ggml_tensor_mean(tensor); float new_mean = ggml_tensor_mean(tensor);
ggml_tensor_scale(tensor, (original_mean / new_mean)); ggml_tensor_scale(tensor, (original_mean / new_mean));
} }
} else {
chunk_hidden_states = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 4096, chunk_len);
ggml_set_f32(chunk_hidden_states, 0.f);
} }
int64_t t1 = ggml_time_ms(); int64_t t1 = ggml_time_ms();
@ -1168,11 +1301,20 @@ struct FluxCLIPEmbedder : public Conditioner {
((float*)chunk_hidden_states->data) + ggml_nelements(chunk_hidden_states)); ((float*)chunk_hidden_states->data) + ggml_nelements(chunk_hidden_states));
} }
hidden_states = vector_to_ggml_tensor(work_ctx, hidden_states_vec); if (hidden_states_vec.size() > 0) {
hidden_states = ggml_reshape_2d(work_ctx, hidden_states = vector_to_ggml_tensor(work_ctx, hidden_states_vec);
hidden_states, hidden_states = ggml_reshape_2d(work_ctx,
chunk_hidden_states->ne[0], hidden_states,
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]); chunk_hidden_states->ne[0],
ggml_nelements(hidden_states) / chunk_hidden_states->ne[0]);
} else {
hidden_states = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 4096, 256);
ggml_set_f32(hidden_states, 0.f);
}
if (pooled == nullptr) {
pooled = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 768);
ggml_set_f32(pooled, 0.f);
}
return {hidden_states, pooled, nullptr}; return {hidden_states, pooled, nullptr};
} }
@ -1203,26 +1345,44 @@ struct T5CLIPEmbedder : public Conditioner {
int mask_pad = 1, int mask_pad = 1,
bool is_umt5 = false) bool is_umt5 = false)
: use_mask(use_mask), mask_pad(mask_pad), t5_tokenizer(is_umt5) { : use_mask(use_mask), mask_pad(mask_pad), t5_tokenizer(is_umt5) {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer", is_umt5); bool use_t5 = false;
for (auto pair : tensor_types) {
if (pair.first.find("text_encoders.t5xxl") != std::string::npos) {
use_t5 = true;
}
}
if (!use_t5) {
LOG_WARN("IMPORTANT NOTICE: No text encoders provided, cannot process prompts!");
return;
} else {
t5 = std::make_shared<T5Runner>(backend, offload_params_to_cpu, tensor_types, "text_encoders.t5xxl.transformer", is_umt5);
}
} }
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override { void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer"); if (t5) {
t5->get_param_tensors(tensors, "text_encoders.t5xxl.transformer");
}
} }
void alloc_params_buffer() override { void alloc_params_buffer() override {
t5->alloc_params_buffer(); if (t5) {
t5->alloc_params_buffer();
}
} }
void free_params_buffer() override { void free_params_buffer() override {
t5->free_params_buffer(); if (t5) {
t5->free_params_buffer();
}
} }
size_t get_params_buffer_size() override { size_t get_params_buffer_size() override {
size_t buffer_size = 0; size_t buffer_size = 0;
if (t5) {
buffer_size += t5->get_params_buffer_size(); buffer_size += t5->get_params_buffer_size();
}
return buffer_size; return buffer_size;
} }
@ -1248,17 +1408,18 @@ struct T5CLIPEmbedder : public Conditioner {
std::vector<int> t5_tokens; std::vector<int> t5_tokens;
std::vector<float> t5_weights; std::vector<float> t5_weights;
std::vector<float> t5_mask; std::vector<float> t5_mask;
for (const auto& item : parsed_attention) { if (t5) {
const std::string& curr_text = item.first; for (const auto& item : parsed_attention) {
float curr_weight = item.second; const std::string& curr_text = item.first;
float curr_weight = item.second;
std::vector<int> curr_tokens = t5_tokenizer.Encode(curr_text, true); std::vector<int> curr_tokens = t5_tokenizer.Encode(curr_text, true);
t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end()); t5_tokens.insert(t5_tokens.end(), curr_tokens.begin(), curr_tokens.end());
t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight); t5_weights.insert(t5_weights.end(), curr_tokens.size(), curr_weight);
}
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, &t5_mask, max_length, padding);
} }
t5_tokenizer.pad_tokens(t5_tokens, t5_weights, &t5_mask, max_length, padding);
return {t5_tokens, t5_weights, t5_mask}; return {t5_tokens, t5_weights, t5_mask};
} }
@ -1282,6 +1443,13 @@ struct T5CLIPEmbedder : public Conditioner {
std::tuple<std::vector<int>, std::vector<float>, std::vector<float>> token_and_weights, std::tuple<std::vector<int>, std::vector<float>, std::vector<float>> token_and_weights,
int clip_skip, int clip_skip,
bool zero_out_masked = false) { bool zero_out_masked = false) {
if (!t5) {
auto hidden_states = ggml_new_tensor_2d(work_ctx, GGML_TYPE_F32, 4096, 256);
ggml_set_f32(hidden_states, 0.f);
auto t5_attn_mask = ggml_new_tensor_1d(work_ctx, GGML_TYPE_F32, 256);
ggml_set_f32(t5_attn_mask, -HUGE_VALF);
return {hidden_states, t5_attn_mask, nullptr};
}
auto& t5_tokens = std::get<0>(token_and_weights); auto& t5_tokens = std::get<0>(token_and_weights);
auto& t5_weights = std::get<1>(token_and_weights); auto& t5_weights = std::get<1>(token_and_weights);
auto& t5_attn_mask_vec = std::get<2>(token_and_weights); auto& t5_attn_mask_vec = std::get<2>(token_and_weights);

86
docs/distilled_sd.md Normal file
View File

@ -0,0 +1,86 @@
# Running distilled models: SSD1B and SD1.x with tiny U-Nets
## Preface
This kind of models have a reduced U-Net part.
Unlike other SDXL models the U-Net of SSD1B has only one middle block and lesser attention layers in up and down blocks, resulting in relatively smaller files. Running these models saves more than 33% of the time. For more details, refer to Segmind's paper on https://arxiv.org/abs/2401.02677v1 .
Unlike other SD 1.x models Tiny-UNet models consist of only 6 U-Net blocks, resulting in relatively smaller files (approximately 1 GB). Running these models saves almost 50% of the time. For more details, refer to the paper: https://arxiv.org/pdf/2305.15798.pdf .
## SSD1B
Unfortunately not all of this models follow the standard model parameter naming mapping.
Anyway there are some very useful SSD1B models available online, such as:
* https://huggingface.co/segmind/SSD-1B/resolve/main/SSD-1B-A1111.safetensors
* https://huggingface.co/hassenhamdi/SSD-1B-fp8_e4m3fn/resolve/main/SSD-1B_fp8_e4m3fn.safetensors
Also there are useful LORAs available:
* https://huggingface.co/seungminh/lora-swarovski-SSD-1B/resolve/main/pytorch_lora_weights.safetensors
* https://huggingface.co/kylielee505/mylcmlorassd/resolve/main/pytorch_lora_weights.safetensors
You can use this files **out-of-the-box** - unlike models in next section.
## SD1.x with tiny U-Nets
There are some Tiny SD 1.x models available online, such as:
* https://huggingface.co/segmind/tiny-sd
* https://huggingface.co/segmind/portrait-finetuned
* https://huggingface.co/nota-ai/bk-sdm-tiny
These models need some conversion, for example because partially tensors are **non contiguous** stored. To create a usable checkpoint file, follow these **easy** steps:
### Download model from Hugging Face
Download the model using Python on your computer, for example this way:
```python
import torch
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained("segmind/tiny-sd")
unet=pipe.unet
for param in unet.parameters():
param.data = param.data.contiguous() # <- important here
pipe.save_pretrained("segmindtiny-sd", safe_serialization=True)
```
### Convert that to a ckpt file
To convert the downloaded model to a checkpoint file, you need another Python script. Download the conversion script from here:
* https://raw.githubusercontent.com/huggingface/diffusers/refs/heads/main/scripts/convert_diffusers_to_original_stable_diffusion.py
### Run convert script
Now, run that conversion script:
```bash
python convert_diffusers_to_original_stable_diffusion.py \
--model_path ./segmindtiny-sd \
--checkpoint_path ./segmind_tiny-sd.ckpt --half
```
The file **segmind_tiny-sd.ckpt** will be generated and is now ready to use with sd.cpp
You can follow a similar process for other models mentioned above from Hugging Face.
### Another ckpt file on the net
There is another model file available online:
* https://huggingface.co/ClashSAN/small-sd/resolve/main/tinySDdistilled.ckpt
If you want to use that, you have to adjust some **non-contiguous tensors** first:
```python
import torch
ckpt = torch.load("tinySDdistilled.ckpt", map_location=torch.device('cpu'))
for key, value in ckpt['state_dict'].items():
if isinstance(value, torch.Tensor):
ckpt['state_dict'][key] = value.contiguous()
torch.save(ckpt, "tinySDdistilled_fixed.ckpt")
```

View File

@ -330,6 +330,10 @@ std::string convert_cond_model_name(const std::string& name) {
return new_name; return new_name;
} }
if (new_name == "model.text_projection.weight") {
new_name = "transformer.text_model.text_projection";
}
if (open_clip_to_hf_clip_model.find(new_name) != open_clip_to_hf_clip_model.end()) { if (open_clip_to_hf_clip_model.find(new_name) != open_clip_to_hf_clip_model.end()) {
new_name = open_clip_to_hf_clip_model[new_name]; new_name = open_clip_to_hf_clip_model[new_name];
} }
@ -623,6 +627,14 @@ std::string convert_tensor_name(std::string name) {
if (starts_with(name, "diffusion_model")) { if (starts_with(name, "diffusion_model")) {
name = "model." + name; name = "model." + name;
} }
if (starts_with(name, "model.diffusion_model.up_blocks.0.attentions.0.")) {
name.replace(0, sizeof("model.diffusion_model.up_blocks.0.attentions.0.") - 1,
"model.diffusion_model.output_blocks.0.1.");
}
if (starts_with(name, "model.diffusion_model.up_blocks.0.attentions.1.")) {
name.replace(0, sizeof("model.diffusion_model.up_blocks.0.attentions.1.") - 1,
"model.diffusion_model.output_blocks.1.1.");
}
// size_t pos = name.find("lora_A"); // size_t pos = name.find("lora_A");
// if (pos != std::string::npos) { // if (pos != std::string::npos) {
// name.replace(pos, strlen("lora_A"), "lora_up"); // name.replace(pos, strlen("lora_A"), "lora_up");
@ -1775,6 +1787,7 @@ SDVersion ModelLoader::get_sd_version() {
bool is_wan = false; bool is_wan = false;
int64_t patch_embedding_channels = 0; int64_t patch_embedding_channels = 0;
bool has_img_emb = false; bool has_img_emb = false;
bool has_middle_block_1 = false;
for (auto& tensor_storage : tensor_storages) { for (auto& tensor_storage : tensor_storages) {
if (!(is_xl)) { if (!(is_xl)) {
@ -1818,6 +1831,10 @@ SDVersion ModelLoader::get_sd_version() {
return VERSION_SVD; return VERSION_SVD;
} }
} }
if (tensor_storage.name.find("model.diffusion_model.middle_block.1.") != std::string::npos ||
tensor_storage.name.find("unet.mid_block.resnets.1.") != std::string::npos) {
has_middle_block_1 = true;
}
if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" || if (tensor_storage.name == "cond_stage_model.transformer.text_model.embeddings.token_embedding.weight" ||
tensor_storage.name == "cond_stage_model.model.token_embedding.weight" || tensor_storage.name == "cond_stage_model.model.token_embedding.weight" ||
tensor_storage.name == "text_model.embeddings.token_embedding.weight" || tensor_storage.name == "text_model.embeddings.token_embedding.weight" ||
@ -1852,6 +1869,9 @@ SDVersion ModelLoader::get_sd_version() {
if (is_ip2p) { if (is_ip2p) {
return VERSION_SDXL_PIX2PIX; return VERSION_SDXL_PIX2PIX;
} }
if (!has_middle_block_1) {
return VERSION_SDXL_SSD1B;
}
return VERSION_SDXL; return VERSION_SDXL;
} }
@ -1875,6 +1895,9 @@ SDVersion ModelLoader::get_sd_version() {
if (is_ip2p) { if (is_ip2p) {
return VERSION_SD1_PIX2PIX; return VERSION_SD1_PIX2PIX;
} }
if (!has_middle_block_1) {
return VERSION_SD1_TINY_UNET;
}
return VERSION_SD1; return VERSION_SD1;
} else if (token_embedding_weight.ne[0] == 1024) { } else if (token_embedding_weight.ne[0] == 1024) {
if (is_inpaint) { if (is_inpaint) {

View File

@ -23,11 +23,13 @@ enum SDVersion {
VERSION_SD1, VERSION_SD1,
VERSION_SD1_INPAINT, VERSION_SD1_INPAINT,
VERSION_SD1_PIX2PIX, VERSION_SD1_PIX2PIX,
VERSION_SD1_TINY_UNET,
VERSION_SD2, VERSION_SD2,
VERSION_SD2_INPAINT, VERSION_SD2_INPAINT,
VERSION_SDXL, VERSION_SDXL,
VERSION_SDXL_INPAINT, VERSION_SDXL_INPAINT,
VERSION_SDXL_PIX2PIX, VERSION_SDXL_PIX2PIX,
VERSION_SDXL_SSD1B,
VERSION_SVD, VERSION_SVD,
VERSION_SD3, VERSION_SD3,
VERSION_FLUX, VERSION_FLUX,
@ -43,7 +45,7 @@ enum SDVersion {
}; };
static inline bool sd_version_is_sd1(SDVersion version) { static inline bool sd_version_is_sd1(SDVersion version) {
if (version == VERSION_SD1 || version == VERSION_SD1_INPAINT || version == VERSION_SD1_PIX2PIX) { if (version == VERSION_SD1 || version == VERSION_SD1_INPAINT || version == VERSION_SD1_PIX2PIX || version == VERSION_SD1_TINY_UNET) {
return true; return true;
} }
return false; return false;
@ -57,7 +59,7 @@ static inline bool sd_version_is_sd2(SDVersion version) {
} }
static inline bool sd_version_is_sdxl(SDVersion version) { static inline bool sd_version_is_sdxl(SDVersion version) {
if (version == VERSION_SDXL || version == VERSION_SDXL_INPAINT || version == VERSION_SDXL_PIX2PIX) { if (version == VERSION_SDXL || version == VERSION_SDXL_INPAINT || version == VERSION_SDXL_PIX2PIX || version == VERSION_SDXL_SSD1B) {
return true; return true;
} }
return false; return false;

View File

@ -28,11 +28,13 @@ const char* model_version_to_str[] = {
"SD 1.x", "SD 1.x",
"SD 1.x Inpaint", "SD 1.x Inpaint",
"Instruct-Pix2Pix", "Instruct-Pix2Pix",
"SD 1.x Tiny UNet",
"SD 2.x", "SD 2.x",
"SD 2.x Inpaint", "SD 2.x Inpaint",
"SDXL", "SDXL",
"SDXL Inpaint", "SDXL Inpaint",
"SDXL Instruct-Pix2Pix", "SDXL Instruct-Pix2Pix",
"SDXL (SSD1B)",
"SVD", "SVD",
"SD3.x", "SD3.x",
"Flux", "Flux",

View File

@ -204,6 +204,9 @@ public:
adm_in_channels = 768; adm_in_channels = 768;
num_head_channels = 64; num_head_channels = 64;
num_heads = -1; num_heads = -1;
} else if (version == VERSION_SD1_TINY_UNET) {
num_res_blocks = 1;
channel_mult = {1, 2, 4};
} }
if (sd_version_is_inpaint(version)) { if (sd_version_is_inpaint(version)) {
in_channels = 9; in_channels = 9;
@ -270,13 +273,22 @@ public:
n_head = ch / d_head; n_head = ch / d_head;
} }
std::string name = "input_blocks." + std::to_string(input_block_idx) + ".1"; std::string name = "input_blocks." + std::to_string(input_block_idx) + ".1";
blocks[name] = std::shared_ptr<GGMLBlock>(get_attention_layer(ch, int td = transformer_depth[i];
n_head, if (version == VERSION_SDXL_SSD1B) {
d_head, if (i == 2) {
transformer_depth[i], td = 4;
context_dim)); }
}
blocks[name] = std::shared_ptr<GGMLBlock>(get_attention_layer(ch,
n_head,
d_head,
td,
context_dim));
} }
input_block_chans.push_back(ch); input_block_chans.push_back(ch);
if (version == VERSION_SD1_TINY_UNET) {
input_block_idx++;
}
} }
if (i != len_mults - 1) { if (i != len_mults - 1) {
input_block_idx += 1; input_block_idx += 1;
@ -295,14 +307,17 @@ public:
d_head = num_head_channels; d_head = num_head_channels;
n_head = ch / d_head; n_head = ch / d_head;
} }
blocks["middle_block.0"] = std::shared_ptr<GGMLBlock>(get_resblock(ch, time_embed_dim, ch)); if (version != VERSION_SD1_TINY_UNET) {
blocks["middle_block.1"] = std::shared_ptr<GGMLBlock>(get_attention_layer(ch, blocks["middle_block.0"] = std::shared_ptr<GGMLBlock>(get_resblock(ch, time_embed_dim, ch));
n_head, if (version != VERSION_SDXL_SSD1B) {
d_head, blocks["middle_block.1"] = std::shared_ptr<GGMLBlock>(get_attention_layer(ch,
transformer_depth[transformer_depth.size() - 1], n_head,
context_dim)); d_head,
blocks["middle_block.2"] = std::shared_ptr<GGMLBlock>(get_resblock(ch, time_embed_dim, ch)); transformer_depth[transformer_depth.size() - 1],
context_dim));
blocks["middle_block.2"] = std::shared_ptr<GGMLBlock>(get_resblock(ch, time_embed_dim, ch));
}
}
// output_blocks // output_blocks
int output_block_idx = 0; int output_block_idx = 0;
for (int i = (int)len_mults - 1; i >= 0; i--) { for (int i = (int)len_mults - 1; i >= 0; i--) {
@ -324,12 +339,27 @@ public:
n_head = ch / d_head; n_head = ch / d_head;
} }
std::string name = "output_blocks." + std::to_string(output_block_idx) + ".1"; std::string name = "output_blocks." + std::to_string(output_block_idx) + ".1";
blocks[name] = std::shared_ptr<GGMLBlock>(get_attention_layer(ch, n_head, d_head, transformer_depth[i], context_dim)); int td = transformer_depth[i];
if (version == VERSION_SDXL_SSD1B) {
if (i == 2 && (j == 0 || j == 1)) {
td = 4;
}
if (i == 1 && (j == 1 || j == 2)) {
td = 1;
}
}
blocks[name] = std::shared_ptr<GGMLBlock>(get_attention_layer(ch, n_head, d_head, td, context_dim));
up_sample_idx++; up_sample_idx++;
} }
if (i > 0 && j == num_res_blocks) { if (i > 0 && j == num_res_blocks) {
if (version == VERSION_SD1_TINY_UNET) {
output_block_idx++;
if (output_block_idx == 2) {
up_sample_idx = 1;
}
}
std::string name = "output_blocks." + std::to_string(output_block_idx) + "." + std::to_string(up_sample_idx); std::string name = "output_blocks." + std::to_string(output_block_idx) + "." + std::to_string(up_sample_idx);
blocks[name] = std::shared_ptr<GGMLBlock>(new UpSampleBlock(ch, ch)); blocks[name] = std::shared_ptr<GGMLBlock>(new UpSampleBlock(ch, ch));
@ -463,6 +493,9 @@ public:
} }
hs.push_back(h); hs.push_back(h);
} }
if (version == VERSION_SD1_TINY_UNET) {
input_block_idx++;
}
if (i != len_mults - 1) { if (i != len_mults - 1) {
ds *= 2; ds *= 2;
input_block_idx += 1; input_block_idx += 1;
@ -477,10 +510,13 @@ public:
// [N, 4*model_channels, h/8, w/8] // [N, 4*model_channels, h/8, w/8]
// middle_block // middle_block
h = resblock_forward("middle_block.0", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8] if (version != VERSION_SD1_TINY_UNET) {
h = attention_layer_forward("middle_block.1", ctx, backend, h, context, num_video_frames); // [N, 4*model_channels, h/8, w/8] h = resblock_forward("middle_block.0", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8]
h = resblock_forward("middle_block.2", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8] if (version != VERSION_SDXL_SSD1B) {
h = attention_layer_forward("middle_block.1", ctx, backend, h, context, num_video_frames); // [N, 4*model_channels, h/8, w/8]
h = resblock_forward("middle_block.2", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8]
}
}
if (controls.size() > 0) { if (controls.size() > 0) {
auto cs = ggml_scale_inplace(ctx, controls[controls.size() - 1], control_strength); auto cs = ggml_scale_inplace(ctx, controls[controls.size() - 1], control_strength);
h = ggml_add(ctx, h, cs); // middle control h = ggml_add(ctx, h, cs); // middle control
@ -516,6 +552,12 @@ public:
} }
if (i > 0 && j == num_res_blocks) { if (i > 0 && j == num_res_blocks) {
if (version == VERSION_SD1_TINY_UNET) {
output_block_idx++;
if (output_block_idx == 2) {
up_sample_idx = 1;
}
}
std::string name = "output_blocks." + std::to_string(output_block_idx) + "." + std::to_string(up_sample_idx); std::string name = "output_blocks." + std::to_string(output_block_idx) + "." + std::to_string(up_sample_idx);
auto block = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]); auto block = std::dynamic_pointer_cast<UpSampleBlock>(blocks[name]);