From 970c4a33125a59b6b2e3237c6d23c63ccd43fd87 Mon Sep 17 00:00:00 2001 From: akleine Date: Mon, 27 Apr 2026 16:42:57 +0200 Subject: [PATCH 1/9] chore: replace some NULL with nullptr + use "%zu" for printing some size_t data (#1457) --- src/ggml_extend.hpp | 18 +++++++++--------- src/stable-diffusion.cpp | 8 ++++---- src/tokenizers/clip_tokenizer.cpp | 2 +- src/tokenizers/mistral_tokenizer.cpp | 2 +- src/tokenizers/qwen2_tokenizer.cpp | 2 +- src/util.cpp | 12 ++++++------ 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 859270cb..cfb38c8d 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -2758,16 +2758,16 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( bool is_conv, WeightAdapter::ForwardParams::conv2d_params_t conv_params, float scale) { - GGML_ASSERT((w1 != NULL || (w1a != NULL && w1b != NULL))); - GGML_ASSERT((w2 != NULL || (w2a != NULL && w2b != NULL))); + GGML_ASSERT((w1 != nullptr || (w1a != nullptr && w1b != nullptr))); + GGML_ASSERT((w2 != nullptr || (w2a != nullptr && w2b != nullptr))); - int uq = (w1 != NULL) ? (int)w1->ne[0] : (int)w1a->ne[0]; - int up = (w1 != NULL) ? (int)w1->ne[1] : (int)w1b->ne[1]; + int uq = (w1 != nullptr) ? (int)w1->ne[0] : (int)w1a->ne[0]; + int up = (w1 != nullptr) ? (int)w1->ne[1] : (int)w1b->ne[1]; int q_actual = is_conv ? (int)h->ne[2] : (int)h->ne[0]; int vq = q_actual / uq; - int vp = (w2 != NULL) ? (is_conv ? (int)w2->ne[3] : (int)w2->ne[1]) + int vp = (w2 != nullptr) ? (is_conv ? (int)w2->ne[3] : (int)w2->ne[1]) : (int)w2a->ne[1]; GGML_ASSERT(q_actual == (uq * vq) && "Input dimension mismatch for LoKR split"); @@ -2803,7 +2803,7 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( #endif ggml_tensor* h_split = ggml_reshape_3d(ctx, h, vq, uq * merge_batch_uq, batch / merge_batch_uq); - if (w2 != NULL) { + if (w2 != nullptr) { hb = ggml_mul_mat(ctx, w2, h_split); } else { hb = ggml_mul_mat(ctx, w2b, ggml_mul_mat(ctx, w2a, h_split)); @@ -2816,7 +2816,7 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( hb_t = ggml_reshape_3d(ctx, hb_t, uq, vp * merge_batch_vp, batch / merge_batch_vp); ggml_tensor* hc_t; - if (w1 != NULL) { + if (w1 != nullptr) { hc_t = ggml_mul_mat(ctx, w1, hb_t); } else { hc_t = ggml_mul_mat(ctx, w1b, ggml_mul_mat(ctx, w1a, hb_t)); @@ -2834,7 +2834,7 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( // 1. Reshape input: [W, H, vq*uq, batch] -> [W, H, vq, uq * batch] ggml_tensor* h_split = ggml_reshape_4d(ctx, h, h->ne[0], h->ne[1], vq, uq * batch); - if (w2 != NULL) { + if (w2 != nullptr) { hb = ggml_ext_conv_2d(ctx, h_split, w2, nullptr, conv_params.s0, conv_params.s1, @@ -2902,7 +2902,7 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( ggml_tensor* hb_merged = ggml_reshape_2d(ctx, hb, w_out * h_out * vp, uq * batch); ggml_tensor* hc_t; ggml_tensor* hb_merged_t = ggml_cont(ctx, ggml_transpose(ctx, hb_merged)); - if (w1 != NULL) { + if (w1 != nullptr) { // Would be great to be able to transpose w1 instead to avoid transposing both hb and hc hc_t = ggml_mul_mat(ctx, w1, hb_merged_t); } else { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index c6541148..f4f8894f 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -198,11 +198,11 @@ public: device = 0; } if (device >= device_count) { - LOG_WARN("Cannot find targeted vulkan device (%llu). Falling back to device 0.", device); + LOG_WARN("Cannot find targeted vulkan device (%zu). Falling back to device 0.", device); device = 0; } } - LOG_INFO("Vulkan: Using device %llu", device); + LOG_INFO("Vulkan: Using device %zu", device); backend = ggml_backend_vk_init(device); } if (!backend) { @@ -3233,7 +3233,7 @@ static sd_image_t* decode_image_outputs(sd_ctx_t* sd_ctx, } decoded_images.push_back(std::move(image)); int64_t t2 = ggml_time_ms(); - LOG_INFO("latent %" PRId64 " decoded, taking %.2fs", i + 1, (t2 - t1) * 1.0f / 1000); + LOG_INFO("latent %zu decoded, taking %.2fs", i + 1, (t2 - t1) * 1.0f / 1000); } int64_t t4 = ggml_time_ms(); @@ -3475,7 +3475,7 @@ SD_API sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* s sd_ctx->sd->diffusion_model->free_params_buffer(); } int64_t denoise_end = ggml_time_ms(); - LOG_INFO("generating %" PRId64 " latent images completed, taking %.2fs", + LOG_INFO("generating %zu latent images completed, taking %.2fs", final_latents.size(), (denoise_end - denoise_start) * 1.0f / 1000); diff --git a/src/tokenizers/clip_tokenizer.cpp b/src/tokenizers/clip_tokenizer.cpp index 57319306..70d63772 100644 --- a/src/tokenizers/clip_tokenizer.cpp +++ b/src/tokenizers/clip_tokenizer.cpp @@ -62,7 +62,7 @@ void CLIPTokenizer::load_from_merges(const std::string& merges_utf8_str) { } vocab.push_back(utf8_to_utf32("<|startoftext|>")); vocab.push_back(utf8_to_utf32("<|endoftext|>")); - LOG_DEBUG("vocab size: %llu", vocab.size()); + LOG_DEBUG("vocab size: %zu", vocab.size()); int i = 0; for (const auto& token : vocab) { encoder[token] = i; diff --git a/src/tokenizers/mistral_tokenizer.cpp b/src/tokenizers/mistral_tokenizer.cpp index 0a56542a..9b0624e3 100644 --- a/src/tokenizers/mistral_tokenizer.cpp +++ b/src/tokenizers/mistral_tokenizer.cpp @@ -28,7 +28,7 @@ void MistralTokenizer::load_from_merges(const std::string& merges_utf8_str, cons byte_decoder[pair.second] = pair.first; } std::vector merges = split_utf32(merges_utf8_str); - LOG_DEBUG("merges size %llu", merges.size()); + LOG_DEBUG("merges size %zu", merges.size()); std::vector> merge_pairs; for (const auto& merge : merges) { size_t space_pos = merge.find(' '); diff --git a/src/tokenizers/qwen2_tokenizer.cpp b/src/tokenizers/qwen2_tokenizer.cpp index 5ddaf4ed..9929ea38 100644 --- a/src/tokenizers/qwen2_tokenizer.cpp +++ b/src/tokenizers/qwen2_tokenizer.cpp @@ -11,7 +11,7 @@ void Qwen2Tokenizer::load_from_merges(const std::string& merges_utf8_str) { } std::vector merges = split_utf32(merges_utf8_str); - LOG_DEBUG("merges size %llu", merges.size()); + LOG_DEBUG("merges size %zu", merges.size()); std::vector> merge_pairs; for (const auto& merge : merges) { size_t space_pos = merge.find(' '); diff --git a/src/util.cpp b/src/util.cpp index e0187626..b28471d7 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -119,10 +119,10 @@ std::unique_ptr MmapWrapper::create(const std::string& filename) { filename.c_str(), GENERIC_READ, FILE_SHARE_READ, - NULL, + nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, - NULL); + nullptr); if (file_handle == INVALID_HANDLE_VALUE) { return nullptr; @@ -136,16 +136,16 @@ std::unique_ptr MmapWrapper::create(const std::string& filename) { file_size = static_cast(size.QuadPart); - HANDLE mapping_handle = CreateFileMapping(file_handle, NULL, PAGE_READONLY, 0, 0, NULL); + HANDLE mapping_handle = CreateFileMapping(file_handle, nullptr, PAGE_READONLY, 0, 0, nullptr); - if (mapping_handle == NULL) { + if (mapping_handle == nullptr) { CloseHandle(file_handle); return nullptr; } mapped_data = MapViewOfFile(mapping_handle, FILE_MAP_READ, 0, 0, file_size); - if (mapped_data == NULL) { + if (mapped_data == nullptr) { CloseHandle(mapping_handle); CloseHandle(file_handle); return nullptr; @@ -203,7 +203,7 @@ std::unique_ptr MmapWrapper::create(const std::string& filename) { size_t file_size = sb.st_size; - void* mapped_data = mmap(NULL, file_size, PROT_READ, mmap_flags, file_descriptor, 0); + void* mapped_data = mmap(nullptr, file_size, PROT_READ, mmap_flags, file_descriptor, 0); close(file_descriptor); From f40a707d0f6b1387945e00fac59fd39e928bc5f0 Mon Sep 17 00:00:00 2001 From: leejet Date: Mon, 27 Apr 2026 22:43:13 +0800 Subject: [PATCH 2/9] feat: add sdcpp-specific generation metadata to image outputs (#1462) --- examples/cli/main.cpp | 9 +- examples/common/common.cpp | 190 ++++++++++++++++++++++++++++++++++++- examples/common/common.h | 9 +- 3 files changed, 201 insertions(+), 7 deletions(-) diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index 8cec2dbc..27513f47 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -433,10 +433,11 @@ bool save_results(const SDCliParams& cli_params, if (!img.data) return false; - std::string params = gen_params.embed_image_metadata - ? get_image_params(ctx_params, gen_params, gen_params.seed + idx) - : ""; - const bool ok = write_image_to_file(path.string(), img.data, img.width, img.height, img.channel, params, 90); + const int64_t metadata_seed = cli_params.mode == VID_GEN ? gen_params.seed : gen_params.seed + idx; + std::string params = gen_params.embed_image_metadata + ? get_image_params(ctx_params, gen_params, metadata_seed, cli_params.mode) + : ""; + const bool ok = write_image_to_file(path.string(), img.data, img.width, img.height, img.channel, params, 90); LOG_INFO("save result image %d to '%s' (%s)", idx, path.string().c_str(), ok ? "success" : "failure"); return ok; }; diff --git a/examples/common/common.cpp b/examples/common/common.cpp index 2d29df26..1a5399b8 100644 --- a/examples/common/common.cpp +++ b/examples/common/common.cpp @@ -2281,7 +2281,192 @@ std::string version_string() { return std::string("stable-diffusion.cpp version ") + sd_version() + ", commit " + sd_commit(); } -std::string get_image_params(const SDContextParams& ctx_params, const SDGenerationParams& gen_params, int64_t seed) { +static std::string safe_json_string(const char* value) { + return value ? value : ""; +} + +static void set_json_basename_if_not_empty(json& target, const char* key, const std::string& path) { + if (!path.empty()) { + target[key] = sd_basename(path); + } +} + +static json build_sampling_metadata_json(const sd_sample_params_t& sample_params, + const std::vector& skip_layers, + const std::vector* custom_sigmas = nullptr) { + json sampling = { + {"steps", sample_params.sample_steps}, + {"eta", sample_params.eta}, + {"shifted_timestep", sample_params.shifted_timestep}, + {"flow_shift", sample_params.flow_shift}, + {"guidance", + { + {"txt_cfg", sample_params.guidance.txt_cfg}, + {"img_cfg", sample_params.guidance.img_cfg}, + {"distilled_guidance", sample_params.guidance.distilled_guidance}, + {"slg", + { + {"scale", sample_params.guidance.slg.scale}, + {"layers", skip_layers}, + {"start", sample_params.guidance.slg.layer_start}, + {"end", sample_params.guidance.slg.layer_end}, + }}, + }}, + }; + if (sample_params.sample_method != SAMPLE_METHOD_COUNT) { + sampling["method"] = safe_json_string(sd_sample_method_name(sample_params.sample_method)); + } + if (sample_params.scheduler != SCHEDULER_COUNT) { + sampling["scheduler"] = safe_json_string(sd_scheduler_name(sample_params.scheduler)); + } + if (custom_sigmas != nullptr) { + sampling["custom_sigmas"] = *custom_sigmas; + } + return sampling; +} + +std::string build_sdcpp_image_metadata_json(const SDContextParams& ctx_params, + const SDGenerationParams& gen_params, + int64_t seed, + SDMode mode) { + json root; + root["schema"] = "sdcpp.image.params/v1"; + root["mode"] = mode == VID_GEN ? "vid_gen" : "img_gen"; + root["generator"] = { + {"name", "stable-diffusion.cpp"}, + {"version", safe_json_string(sd_version())}, + {"commit", safe_json_string(sd_commit())}, + }; + root["seed"] = seed; + root["width"] = gen_params.get_resolved_width(); + root["height"] = gen_params.get_resolved_height(); + + root["prompt"] = { + {"positive", gen_params.prompt}, + {"negative", gen_params.negative_prompt}, + }; + root["sampling"] = build_sampling_metadata_json(gen_params.sample_params, + gen_params.skip_layers, + &gen_params.custom_sigmas); + + json models; + set_json_basename_if_not_empty(models, "model", ctx_params.model_path); + set_json_basename_if_not_empty(models, "clip_l", ctx_params.clip_l_path); + set_json_basename_if_not_empty(models, "clip_g", ctx_params.clip_g_path); + set_json_basename_if_not_empty(models, "clip_vision", ctx_params.clip_vision_path); + set_json_basename_if_not_empty(models, "t5xxl", ctx_params.t5xxl_path); + set_json_basename_if_not_empty(models, "llm", ctx_params.llm_path); + set_json_basename_if_not_empty(models, "llm_vision", ctx_params.llm_vision_path); + set_json_basename_if_not_empty(models, "diffusion_model", ctx_params.diffusion_model_path); + set_json_basename_if_not_empty(models, "high_noise_diffusion_model", ctx_params.high_noise_diffusion_model_path); + set_json_basename_if_not_empty(models, "vae", ctx_params.vae_path); + set_json_basename_if_not_empty(models, "taesd", ctx_params.taesd_path); + set_json_basename_if_not_empty(models, "control_net", ctx_params.control_net_path); + root["models"] = std::move(models); + + root["clip_skip"] = gen_params.clip_skip; + root["strength"] = gen_params.strength; + root["control_strength"] = gen_params.control_strength; + root["auto_resize_ref_image"] = gen_params.auto_resize_ref_image; + root["increase_ref_index"] = gen_params.increase_ref_index; + if (mode == VID_GEN) { + root["video"] = { + {"frame_count", gen_params.video_frames}, + {"fps", gen_params.fps}, + }; + root["moe_boundary"] = gen_params.moe_boundary; + root["vace_strength"] = gen_params.vace_strength; + root["high_noise_sampling"] = build_sampling_metadata_json(gen_params.high_noise_sample_params, + gen_params.high_noise_skip_layers); + } + + root["rng"] = safe_json_string(sd_rng_type_name(ctx_params.rng_type)); + if (ctx_params.sampler_rng_type != RNG_TYPE_COUNT) { + root["sampler_rng"] = safe_json_string(sd_rng_type_name(ctx_params.sampler_rng_type)); + } + + json loras = json::array(); + for (const auto& entry : gen_params.lora_map) { + loras.push_back({ + {"name", sd_basename(entry.first)}, + {"multiplier", entry.second}, + {"is_high_noise", false}, + }); + } + for (const auto& entry : gen_params.high_noise_lora_map) { + loras.push_back({ + {"name", sd_basename(entry.first)}, + {"multiplier", entry.second}, + {"is_high_noise", true}, + }); + } + if (!loras.empty()) { + root["loras"] = std::move(loras); + } + + if (gen_params.hires_enabled) { + root["hires"] = { + {"enabled", gen_params.hires_enabled}, + {"upscaler", gen_params.hires_upscaler}, + {"model", gen_params.hires_upscaler_model_path.empty() ? "" : sd_basename(gen_params.hires_upscaler_model_path)}, + {"scale", gen_params.hires_scale}, + {"target_width", gen_params.hires_width}, + {"target_height", gen_params.hires_height}, + {"steps", gen_params.hires_steps}, + {"denoising_strength", gen_params.hires_denoising_strength}, + {"upscale_tile_size", gen_params.hires_upscale_tile_size}, + }; + } + + if (gen_params.cache_params.mode != SD_CACHE_DISABLED) { + root["cache"] = { + {"requested_mode", gen_params.cache_mode}, + {"requested_option", gen_params.cache_option}, + {"mode", gen_params.cache_params.mode}, + {"scm_mask", gen_params.scm_mask}, + {"scm_policy_dynamic", gen_params.scm_policy_dynamic}, + {"reuse_threshold", gen_params.cache_params.reuse_threshold}, + {"start_percent", gen_params.cache_params.start_percent}, + {"end_percent", gen_params.cache_params.end_percent}, + {"error_decay_rate", gen_params.cache_params.error_decay_rate}, + {"use_relative_threshold", gen_params.cache_params.use_relative_threshold}, + {"reset_error_on_compute", gen_params.cache_params.reset_error_on_compute}, + {"Fn_compute_blocks", gen_params.cache_params.Fn_compute_blocks}, + {"Bn_compute_blocks", gen_params.cache_params.Bn_compute_blocks}, + {"residual_diff_threshold", gen_params.cache_params.residual_diff_threshold}, + {"max_warmup_steps", gen_params.cache_params.max_warmup_steps}, + {"max_cached_steps", gen_params.cache_params.max_cached_steps}, + {"max_continuous_cached_steps", gen_params.cache_params.max_continuous_cached_steps}, + {"taylorseer_n_derivatives", gen_params.cache_params.taylorseer_n_derivatives}, + {"taylorseer_skip_interval", gen_params.cache_params.taylorseer_skip_interval}, + {"spectrum_w", gen_params.cache_params.spectrum_w}, + {"spectrum_m", gen_params.cache_params.spectrum_m}, + {"spectrum_lam", gen_params.cache_params.spectrum_lam}, + {"spectrum_window_size", gen_params.cache_params.spectrum_window_size}, + {"spectrum_flex_window", gen_params.cache_params.spectrum_flex_window}, + {"spectrum_warmup_steps", gen_params.cache_params.spectrum_warmup_steps}, + {"spectrum_stop_percent", gen_params.cache_params.spectrum_stop_percent}, + }; + } + + if (gen_params.vae_tiling_params.enabled) { + root["vae_tiling"] = { + {"enabled", gen_params.vae_tiling_params.enabled}, + {"tile_size_x", gen_params.vae_tiling_params.tile_size_x}, + {"tile_size_y", gen_params.vae_tiling_params.tile_size_y}, + {"target_overlap", gen_params.vae_tiling_params.target_overlap}, + {"rel_size_x", gen_params.vae_tiling_params.rel_size_x}, + {"rel_size_y", gen_params.vae_tiling_params.rel_size_y}, + }; + } + + return root.dump(); +} + +std::string get_image_params(const SDContextParams& ctx_params, + const SDGenerationParams& gen_params, + int64_t seed, + SDMode mode) { std::string parameter_string; if (gen_params.prompt_with_lora.size() != 0) { parameter_string += gen_params.prompt_with_lora + "\n"; @@ -2294,7 +2479,7 @@ std::string get_image_params(const SDContextParams& ctx_params, const SDGenerati parameter_string += "Steps: " + std::to_string(gen_params.sample_params.sample_steps) + ", "; parameter_string += "CFG scale: " + std::to_string(gen_params.sample_params.guidance.txt_cfg) + ", "; if (gen_params.sample_params.guidance.slg.scale != 0 && gen_params.skip_layers.size() != 0) { - parameter_string += "SLG scale: " + std::to_string(gen_params.sample_params.guidance.txt_cfg) + ", "; + parameter_string += "SLG scale: " + std::to_string(gen_params.sample_params.guidance.slg.scale) + ", "; parameter_string += "Skip layers: ["; for (const auto& layer : gen_params.skip_layers) { parameter_string += std::to_string(layer) + ", "; @@ -2347,5 +2532,6 @@ std::string get_image_params(const SDContextParams& ctx_params, const SDGenerati parameter_string += "Denoising strength: " + std::to_string(gen_params.hires_denoising_strength) + ", "; } parameter_string += "Version: stable-diffusion.cpp"; + parameter_string += ", SDCPP: " + build_sdcpp_image_metadata_json(ctx_params, gen_params, seed, mode); return parameter_string; } diff --git a/examples/common/common.h b/examples/common/common.h index 333d3311..c4498c35 100644 --- a/examples/common/common.h +++ b/examples/common/common.h @@ -249,6 +249,13 @@ struct SDGenerationParams { }; std::string version_string(); -std::string get_image_params(const SDContextParams& ctx_params, const SDGenerationParams& gen_params, int64_t seed); +std::string build_sdcpp_image_metadata_json(const SDContextParams& ctx_params, + const SDGenerationParams& gen_params, + int64_t seed, + SDMode mode = IMG_GEN); +std::string get_image_params(const SDContextParams& ctx_params, + const SDGenerationParams& gen_params, + int64_t seed, + SDMode mode = IMG_GEN); #endif // __EXAMPLES_COMMON_COMMON_H__ From a81677f59c92d90343aebca51dfed7decf0a0cb0 Mon Sep 17 00:00:00 2001 From: Douglas Griffith Date: Mon, 27 Apr 2026 10:55:30 -0400 Subject: [PATCH 3/9] docs: performance tips markup (#1460) --- docs/caching.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/caching.md b/docs/caching.md index b02a541b..01f01974 100644 --- a/docs/caching.md +++ b/docs/caching.md @@ -131,8 +131,6 @@ sd-cli -m model.safetensors -p "a cat" --cache-mode spectrum | `warmup` | Steps to always compute before caching starts | 4 | | `stop` | Stop caching at this fraction of total steps | 0.9 | -``` - ### Performance Tips - Start with default thresholds and adjust based on output quality From 331cfa5387633084e20f088078ed0c5a9fa5a434 Mon Sep 17 00:00:00 2001 From: Wagner Bruna Date: Wed, 29 Apr 2026 11:25:30 -0300 Subject: [PATCH 4/9] fix: release VAE compute buffer after tiled encoding (#1465) --- src/vae.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/vae.hpp b/src/vae.hpp index dc69535e..54bd88ab 100644 --- a/src/vae.hpp +++ b/src/vae.hpp @@ -142,9 +142,10 @@ public: "vae encode compute failed while processing a tile"); } else { output = _compute(n_threads, input, false); - free_compute_buffer(); } + free_compute_buffer(); + if (output.empty()) { LOG_ERROR("vae encode compute failed"); return {}; From b8079e253d6f2cf4fd69fe6ea247f337325c3d1d Mon Sep 17 00:00:00 2001 From: Wagner Bruna Date: Wed, 29 Apr 2026 12:26:57 -0300 Subject: [PATCH 5/9] feat: transition from compile-time to runtime backend discovery (#1448) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Stéphane du Hamel Co-authored-by: Cyberhan123 <255542417@qq.com> Co-authored-by: leejet --- CMakeLists.txt | 7 - src/common_block.hpp | 8 +- src/ggml_extend.hpp | 188 ++++++++++++++--------- src/ggml_extend_backend.hpp | 298 ++++++++++++++++++++++++++++++++++++ src/lora.hpp | 74 ++++----- src/model.cpp | 15 +- src/qwen_image.hpp | 8 +- src/stable-diffusion.cpp | 55 +------ src/upscaler.cpp | 23 +-- src/util.cpp | 120 ++++++++++++--- src/util.h | 5 + src/z_image.hpp | 16 +- 12 files changed, 577 insertions(+), 240 deletions(-) create mode 100644 src/ggml_extend_backend.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 6a9fb104..48ce456e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -72,37 +72,31 @@ option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF if(SD_CUDA) message("-- Use CUDA as backend stable-diffusion") set(GGML_CUDA ON) - add_definitions(-DSD_USE_CUDA) endif() if(SD_METAL) message("-- Use Metal as backend stable-diffusion") set(GGML_METAL ON) - add_definitions(-DSD_USE_METAL) endif() if (SD_VULKAN) message("-- Use Vulkan as backend stable-diffusion") set(GGML_VULKAN ON) - add_definitions(-DSD_USE_VULKAN) endif () if (SD_OPENCL) message("-- Use OpenCL as backend stable-diffusion") set(GGML_OPENCL ON) - add_definitions(-DSD_USE_OPENCL) endif () if (SD_HIPBLAS) message("-- Use HIPBLAS as backend stable-diffusion") set(GGML_HIP ON) - add_definitions(-DSD_USE_CUDA) endif () if(SD_MUSA) message("-- Use MUSA as backend stable-diffusion") set(GGML_MUSA ON) - add_definitions(-DSD_USE_CUDA) endif() if(SD_WEBP) @@ -222,7 +216,6 @@ if(SD_SYCL) message("-- Use SYCL as backend stable-diffusion") set(GGML_SYCL ON) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -fsycl") - add_definitions(-DSD_USE_SYCL) # disable fast-math on host, see: # https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-10/fp-model-fp.html if (WIN32) diff --git a/src/common_block.hpp b/src/common_block.hpp index 112a4d7a..e6c0b06b 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -1,7 +1,9 @@ #ifndef __COMMON_BLOCK_HPP__ #define __COMMON_BLOCK_HPP__ +#include "ggml-backend.h" #include "ggml_extend.hpp" +#include "util.h" class DownSampleBlock : public GGMLBlock { protected: @@ -248,9 +250,6 @@ public: float scale = 1.f; if (precision_fix) { scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif } // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using Vulkan without enabling force_prec_f32, @@ -264,6 +263,9 @@ public: auto net_0 = std::dynamic_pointer_cast(blocks["net.0"]); auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); + if (sd_backend_is(ctx->backend, "Vulkan")) { + net_2->set_force_prec_f32(true); + } x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim] x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out] diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index cfb38c8d..8b748194 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -24,32 +24,12 @@ #include "ggml-alloc.h" #include "ggml-backend.h" -#include "ggml-cpu.h" #include "ggml.h" +#include "ggml_extend_backend.hpp" #include "model.h" #include "tensor.hpp" -#ifdef SD_USE_CUDA -#include "ggml-cuda.h" -#endif - -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - -#ifdef SD_USE_SYCL -#include "ggml-sycl.h" -#endif - #include "rng.hpp" #include "tensor_ggml.hpp" #include "util.h" @@ -91,6 +71,48 @@ __STATIC_INLINE__ void ggml_log_callback_default(ggml_log_level level, const cha } } +__STATIC_INLINE__ bool backend_name_exists(std::string name) { + ggml_backend_load_all_once(); + const size_t device_count = ggml_backend_dev_count(); + for (size_t i = 0; i < device_count; ++i) { + if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { + return true; + } + } + return false; +} + +__STATIC_INLINE__ std::string sanitize_backend_name(std::string name) { + if (name == "" || backend_name_exists(name)) { + return name; + } else { + LOG_WARN("Backend %s not found, using default backend", name.c_str()); + return ""; + } +} + +__STATIC_INLINE__ std::string get_default_backend_name() { + ggml_backend_load_all_once(); + // should pick the same backend as ggml_backend_init_best + ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + if (dev == nullptr) { + return ""; + } + return ggml_backend_dev_name(dev); +} + +__STATIC_INLINE__ ggml_backend_t init_named_backend(std::string name = "") { + ggml_backend_load_all_once(); + LOG_DEBUG("Initializing backend: %s", name.c_str()); + if (name.empty()) { + return ggml_backend_init_best(); + } else { + return ggml_backend_init_by_name(name.c_str(), nullptr); + } +} + static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); // n-mode tensor-matrix product @@ -1286,25 +1308,25 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_ones_like(ggml_context* ctx, return ggml_ext_ones(ctx, x->ne[0], x->ne[1], x->ne[2], x->ne[3]); } -__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_tensor* a) { -#ifdef SD_USE_VULKAN - auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); - auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); - out = ggml_get_rows(ctx, out, zero_index); - out = ggml_reshape(ctx, out, a); - // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); - return out; -#else - auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); - ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] - if (ggml_is_transposed(out)) { - out = ggml_mul_mat(ctx, one, out); +__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* a) { + if (sd_backend_is(backend, "Vulkan")) { + auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); + auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); + out = ggml_get_rows(ctx, out, zero_index); + out = ggml_reshape(ctx, out, a); + // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); + return out; } else { - out = ggml_mul_mat(ctx, out, one); + auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); + ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] + if (ggml_is_transposed(out)) { + out = ggml_mul_mat(ctx, one, out); + } else { + out = ggml_mul_mat(ctx, out, one); + } + out = ggml_reshape(ctx, out, a); + return out; } - out = ggml_reshape(ctx, out, a); -#endif - return out; } // q: [N, L_q, C(n_head*d_head)] or [N*n_head, L_q, d_head] @@ -1496,16 +1518,14 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm(ggml_context* ctx, } __STATIC_INLINE__ void ggml_ext_backend_tensor_get_and_sync(ggml_backend_t backend, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { -#if defined(SD_USE_CUDA) || defined(SD_USE_SYCL) - if (!ggml_backend_is_cpu(backend)) { + if ((sd_backend_is(backend, "ROCm") || sd_backend_is(backend, "CUDA") || sd_backend_is(backend, "SYCL")) && + !ggml_backend_is_cpu(backend)) { ggml_backend_tensor_get_async(backend, tensor, data, offset, size); ggml_backend_synchronize(backend); - } else { - ggml_backend_tensor_get(tensor, data, offset, size); + return; } -#else + ggml_backend_tensor_get(tensor, data, offset, size); -#endif } __STATIC_INLINE__ float ggml_ext_backend_tensor_get_f32(ggml_tensor* tensor) { @@ -1664,14 +1684,15 @@ struct WeightAdapter { float scale = 1.f; } conv2d; }; - virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) = 0; + virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) = 0; virtual ggml_tensor* forward_with_lora(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, ggml_tensor* w, ggml_tensor* b, const std::string& prefix, - ForwardParams forward_params) = 0; - virtual size_t get_extra_graph_size() = 0; + ForwardParams forward_params) = 0; + virtual size_t get_extra_graph_size() = 0; }; struct GGMLRunnerContext { @@ -2192,6 +2213,14 @@ public: void set_weight_adapter(const std::shared_ptr& adapter) { weight_adapter = adapter; } + + ggml_backend_t get_runtime_backend() { + return runtime_backend; + } + + ggml_backend_t get_params_backend() { + return params_backend; + } }; class GGMLBlock { @@ -2336,6 +2365,14 @@ public: force_prec_f32(force_prec_f32), scale(scale) {} + void set_scale(float scale_) { + scale = scale_; + } + + void set_force_prec_f32(bool force_prec_f32_) { + force_prec_f32 = force_prec_f32_; + } + ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; @@ -2347,7 +2384,7 @@ public: forward_params.op_type = WeightAdapter::ForwardParams::op_type_t::OP_LINEAR; forward_params.linear.force_prec_f32 = force_prec_f32; forward_params.linear.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_linear(ctx->ggml_ctx, x, w, b, force_prec_f32, scale); } @@ -2463,7 +2500,7 @@ public: forward_params.conv2d.circular_x = ctx->circular_x_enabled; forward_params.conv2d.circular_y = ctx->circular_y_enabled; forward_params.conv2d.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_conv_2d(ctx->ggml_ctx, x, @@ -2527,7 +2564,7 @@ public: ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); if (w->type != GGML_TYPE_F16) { w = ggml_cast(ctx->ggml_ctx, w, GGML_TYPE_F16); } @@ -2535,7 +2572,7 @@ public: if (bias) { b = params["bias"]; if (ctx->weight_adapter) { - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } return ggml_ext_conv_3d(ctx->ggml_ctx, x, w, b, in_channels, @@ -2582,12 +2619,12 @@ public: if (elementwise_affine) { w = params["weight"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); } if (bias) { b = params["bias"]; if (ctx->weight_adapter) { - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } } @@ -2630,8 +2667,8 @@ public: w = params["weight"]; b = params["bias"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } return ggml_ext_group_norm(ctx->ggml_ctx, x, w, b, num_groups); @@ -2665,7 +2702,7 @@ public: ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); } x = ggml_rms_norm(ctx->ggml_ctx, x, eps); x = ggml_mul_inplace(ctx->ggml_ctx, x, w); @@ -2748,6 +2785,7 @@ public: __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* h, // Input: [q, batch] or [W, H, q, batch] ggml_tensor* w1, // Outer C (Full rank) ggml_tensor* w1a, // Outer A (Low rank part 1) @@ -2768,7 +2806,7 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( int vq = q_actual / uq; int vp = (w2 != nullptr) ? (is_conv ? (int)w2->ne[3] : (int)w2->ne[1]) - : (int)w2a->ne[1]; + : (int)w2a->ne[1]; GGML_ASSERT(q_actual == (uq * vq) && "Input dimension mismatch for LoKR split"); ggml_tensor* hb; @@ -2778,29 +2816,29 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_lokr_forward( int merge_batch_uq = batch; int merge_batch_vp = batch; -#if SD_USE_VULKAN - if (batch > 1) { - // no access to backend here, worst case is slightly worse perfs for other backends when built alongside Vulkan backend - int max_batch = 65535; - int max_batch_uq = max_batch / uq; - merge_batch_uq = 1; - for (int i = max_batch_uq; i > 0; i--) { - if (batch % i == 0) { - merge_batch_uq = i; - break; + if (sd_backend_is(backend, "Vulkan")) { + if (batch > 1) { + // no access to backend here, worst case is slightly worse perfs for other backends when built alongside Vulkan backend + int max_batch = 65535; + int max_batch_uq = max_batch / uq; + merge_batch_uq = 1; + for (int i = max_batch_uq; i > 0; i--) { + if (batch % i == 0) { + merge_batch_uq = i; + break; + } } - } - int max_batch_vp = max_batch / vp; - merge_batch_vp = 1; - for (int i = max_batch_vp; i > 0; i--) { - if (batch % i == 0) { - merge_batch_vp = i; - break; + int max_batch_vp = max_batch / vp; + merge_batch_vp = 1; + for (int i = max_batch_vp; i > 0; i--) { + if (batch % i == 0) { + merge_batch_vp = i; + break; + } } } } -#endif ggml_tensor* h_split = ggml_reshape_3d(ctx, h, vq, uq * merge_batch_uq, batch / merge_batch_uq); if (w2 != nullptr) { diff --git a/src/ggml_extend_backend.hpp b/src/ggml_extend_backend.hpp new file mode 100644 index 00000000..50158c88 --- /dev/null +++ b/src/ggml_extend_backend.hpp @@ -0,0 +1,298 @@ +#ifndef __GGML_EXTEND_BACKEND_HPP__ +#define __GGML_EXTEND_BACKEND_HPP__ + +#include +#include + +#include "ggml-backend.h" +#include "ggml.h" + +#ifndef __STATIC_INLINE__ +#define __STATIC_INLINE__ static inline +#endif + +inline void ggml_backend_load_all_once() { + // If the registry already has devices and the CPU backend is present, + // assume either static registration or explicit host-side preloading has + // completed and avoid rescanning the default paths. + if (ggml_backend_dev_count() > 0 && ggml_backend_reg_by_name("CPU") != nullptr) { + return; + } + // In dynamic-backend mode the backend modules are discovered at runtime, + // so we must load them before asking for the CPU backend or its proc table. + // If the host preloaded only a subset of backends, allow one default-path + // scan so missing modules can still be discovered. + static std::once_flag once; + std::call_once(once, []() { + if (ggml_backend_dev_count() > 0 && ggml_backend_reg_by_name("CPU") != nullptr) { + return; + } + ggml_backend_load_all(); + }); +} + +// Do not gate this branch on GGML_CPU or GGML_CPU_ALL_VARIANTS: +// those are CMake options used to configure ggml itself, but they are not +// exported as PUBLIC compile definitions to stable-diffusion in backend-DL mode. +// In practice, this target can reliably see GGML_BACKEND_DL, but not whether +// the CPU backend was compiled as a loadable module. We therefore use runtime +// backend discovery instead of compile-time assumptions. + +__STATIC_INLINE__ ggml_backend_reg_t ggml_backend_cpu_reg() { + ggml_backend_reg_t reg = ggml_backend_reg_by_name("CPU"); + if (reg != nullptr) { + return reg; + } + + ggml_backend_load_all_once(); + return ggml_backend_reg_by_name("CPU"); +} + +__STATIC_INLINE__ ggml_backend_reg_t ggml_backend_reg_from_backend(ggml_backend_t backend) { + if (backend != nullptr) { + ggml_backend_dev_t device = ggml_backend_get_device(backend); + if (device != nullptr) { + return ggml_backend_dev_backend_reg(device); + } + } + + return ggml_backend_cpu_reg(); +} + +__STATIC_INLINE__ ggml_backend_t ggml_backend_cpu_init() { + ggml_backend_t backend = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); + if (backend != nullptr) { + return backend; + } + + ggml_backend_load_all_once(); + return ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); +} + +__STATIC_INLINE__ bool ggml_backend_is_cpu(ggml_backend_t backend) { + if (backend == nullptr) { + return false; + } + + ggml_backend_dev_t device = ggml_backend_get_device(backend); + if (device != nullptr) { + return ggml_backend_dev_type(device) == GGML_BACKEND_DEVICE_TYPE_CPU; + } + + const char* backend_name = ggml_backend_name(backend); + return backend_name != nullptr && std::strcmp(backend_name, "CPU") == 0; +} + +__STATIC_INLINE__ void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) { + ggml_backend_reg_t reg = ggml_backend_reg_from_backend(backend_cpu); + if (reg == nullptr) { + return; + } + + auto fn = reinterpret_cast(ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads")); + if (fn != nullptr) { + fn(backend_cpu, n_threads); + } +} + +using __ggml_backend_cpu_set_threadpool_t = void (*)(ggml_backend_t backend_cpu, ggml_threadpool_t threadpool); + +__STATIC_INLINE__ void ggml_backend_cpu_set_threadpool(ggml_backend_t backend_cpu, ggml_threadpool_t threadpool) { + ggml_backend_reg_t reg = ggml_backend_reg_from_backend(backend_cpu); + if (reg == nullptr) { + return; + } + + auto fn = reinterpret_cast<__ggml_backend_cpu_set_threadpool_t>(ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool")); + if (fn != nullptr) { + fn(backend_cpu, threadpool); + } +} + +__STATIC_INLINE__ void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void* abort_callback_data) { + ggml_backend_reg_t reg = ggml_backend_reg_from_backend(backend_cpu); + if (reg == nullptr) { + return; + } + + auto fn = reinterpret_cast(ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback")); + if (fn != nullptr) { + fn(backend_cpu, abort_callback, abort_callback_data); + } +} + +__STATIC_INLINE__ ggml_backend_buffer_t ggml_backend_tensor_buffer(const struct ggml_tensor* tensor) { + if (tensor == nullptr) { + return nullptr; + } + + return tensor->view_src ? tensor->view_src->buffer : tensor->buffer; +} + +__STATIC_INLINE__ bool ggml_backend_tensor_is_host_accessible(const struct ggml_tensor* tensor) { + if (tensor == nullptr || tensor->data == nullptr) { + return false; + } + + ggml_backend_buffer_t buffer = ggml_backend_tensor_buffer(tensor); + return buffer == nullptr || ggml_backend_buffer_is_host(buffer); +} + +__STATIC_INLINE__ size_t ggml_backend_tensor_offset(const struct ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3) { + return (size_t)(i0 * tensor->nb[0] + i1 * tensor->nb[1] + i2 * tensor->nb[2] + i3 * tensor->nb[3]); +} + +template +__STATIC_INLINE__ void ggml_backend_tensor_write_scalar(const struct ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3, T value) { + const size_t offset = ggml_backend_tensor_offset(tensor, i0, i1, i2, i3); + + if (ggml_backend_tensor_is_host_accessible(tensor)) { + auto* dst = reinterpret_cast(reinterpret_cast(tensor->data) + offset); + *dst = value; + return; + } + + ggml_backend_tensor_set(const_cast(tensor), &value, offset, sizeof(T)); +} + +__STATIC_INLINE__ void ggml_set_f32_nd(const struct ggml_tensor* tensor, int64_t i0, int64_t i1, int64_t i2, int64_t i3, float value) { + switch (tensor->type) { + case GGML_TYPE_I8: + ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, static_cast(value)); + break; + case GGML_TYPE_I16: + ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, static_cast(value)); + break; + case GGML_TYPE_I32: + ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, static_cast(value)); + break; + case GGML_TYPE_F16: + ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, ggml_fp32_to_fp16(value)); + break; + case GGML_TYPE_BF16: + ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, ggml_fp32_to_bf16(value)); + break; + case GGML_TYPE_F32: + ggml_backend_tensor_write_scalar(tensor, i0, i1, i2, i3, value); + break; + default: + GGML_ABORT("fatal error"); + } +} + +__STATIC_INLINE__ void ggml_set_f32_1d(const struct ggml_tensor* tensor, int i, float value) { + if (!ggml_is_contiguous(tensor)) { + int64_t id[4] = {0, 0, 0, 0}; + ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); + ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value); + return; + } + + switch (tensor->type) { + case GGML_TYPE_I8: + ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, static_cast(value)); + break; + case GGML_TYPE_I16: + ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, static_cast(value)); + break; + case GGML_TYPE_I32: + ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, static_cast(value)); + break; + case GGML_TYPE_F16: + ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, ggml_fp32_to_fp16(value)); + break; + case GGML_TYPE_BF16: + ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, ggml_fp32_to_bf16(value)); + break; + case GGML_TYPE_F32: + ggml_backend_tensor_write_scalar(tensor, i, 0, 0, 0, value); + break; + default: + GGML_ABORT("fatal error"); + } +} + +__STATIC_INLINE__ enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context* ctx, struct ggml_cgraph* cgraph, int n_threads) { + (void)ctx; + + // The legacy ggml_graph_compute_with_ctx() symbol lives in ggml-cpu, but + // the backend proc table does not expose it in GGML_BACKEND_DL mode. + // Recreate the old behavior by initializing the CPU backend explicitly and + // executing the graph through the generic backend API. + ggml_backend_t backend = ggml_backend_cpu_init(); + if (backend == nullptr) { + return GGML_STATUS_ALLOC_FAILED; + } + + ggml_backend_cpu_set_n_threads(backend, n_threads); + + const enum ggml_status status = ggml_backend_graph_compute(backend, cgraph); + ggml_backend_free(backend); + + return status; +} + +__STATIC_INLINE__ ggml_tensor* ggml_set_f32(struct ggml_tensor* tensor, float value) { + GGML_ASSERT(tensor != nullptr); + + if (ggml_backend_tensor_is_host_accessible(tensor) && ggml_is_contiguous(tensor)) { + const int64_t nelements = ggml_nelements(tensor); + + switch (tensor->type) { + case GGML_TYPE_I8: { + auto* data = reinterpret_cast(tensor->data); + const int8_t v = static_cast(value); + for (int64_t i = 0; i < nelements; ++i) { + data[i] = v; + } + } break; + case GGML_TYPE_I16: { + auto* data = reinterpret_cast(tensor->data); + const int16_t v = static_cast(value); + for (int64_t i = 0; i < nelements; ++i) { + data[i] = v; + } + } break; + case GGML_TYPE_I32: { + auto* data = reinterpret_cast(tensor->data); + const int32_t v = static_cast(value); + for (int64_t i = 0; i < nelements; ++i) { + data[i] = v; + } + } break; + case GGML_TYPE_F16: { + auto* data = reinterpret_cast(tensor->data); + const ggml_fp16_t v = ggml_fp32_to_fp16(value); + for (int64_t i = 0; i < nelements; ++i) { + data[i] = v; + } + } break; + case GGML_TYPE_BF16: { + auto* data = reinterpret_cast(tensor->data); + const ggml_bf16_t v = ggml_fp32_to_bf16(value); + for (int64_t i = 0; i < nelements; ++i) { + data[i] = v; + } + } break; + case GGML_TYPE_F32: { + auto* data = reinterpret_cast(tensor->data); + for (int64_t i = 0; i < nelements; ++i) { + data[i] = value; + } + } break; + default: + GGML_ABORT("fatal error"); + } + + return tensor; + } + + const int64_t nelements = ggml_nelements(tensor); + for (int64_t i = 0; i < nelements; ++i) { + ggml_set_f32_1d(tensor, static_cast(i), value); + } + + return tensor; +} + +#endif diff --git a/src/lora.hpp b/src/lora.hpp index d4a749ef..b57bc422 100644 --- a/src/lora.hpp +++ b/src/lora.hpp @@ -129,7 +129,7 @@ struct LoraModel : public GGMLRunner { } } - ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -152,17 +152,17 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(lora_up_name); if (iter != lora_tensors.end()) { - lora_up = ggml_ext_cast_f32(ctx, iter->second); + lora_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lora_mid_name); if (iter != lora_tensors.end()) { - lora_mid = ggml_ext_cast_f32(ctx, iter->second); + lora_mid = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lora_down_name); if (iter != lora_tensors.end()) { - lora_down = ggml_ext_cast_f32(ctx, iter->second); + lora_down = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lora_up == nullptr || lora_down == nullptr) { @@ -208,7 +208,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -225,7 +225,7 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(diff_name); if (iter != lora_tensors.end()) { - curr_updown = ggml_ext_cast_f32(ctx, iter->second); + curr_updown = ggml_ext_cast_f32(ctx, backend, iter->second); } else { break; } @@ -248,7 +248,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -276,33 +276,33 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(hada_1_down_name); if (iter != lora_tensors.end()) { - hada_1_down = ggml_ext_cast_f32(ctx, iter->second); + hada_1_down = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_1_up_name); if (iter != lora_tensors.end()) { - hada_1_up = ggml_ext_cast_f32(ctx, iter->second); + hada_1_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_1_mid_name); if (iter != lora_tensors.end()) { - hada_1_mid = ggml_ext_cast_f32(ctx, iter->second); + hada_1_mid = ggml_ext_cast_f32(ctx, backend, iter->second); hada_1_up = ggml_cont(ctx, ggml_transpose(ctx, hada_1_up)); } iter = lora_tensors.find(hada_2_down_name); if (iter != lora_tensors.end()) { - hada_2_down = ggml_ext_cast_f32(ctx, iter->second); + hada_2_down = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_2_up_name); if (iter != lora_tensors.end()) { - hada_2_up = ggml_ext_cast_f32(ctx, iter->second); + hada_2_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_2_mid_name); if (iter != lora_tensors.end()) { - hada_2_mid = ggml_ext_cast_f32(ctx, iter->second); + hada_2_mid = ggml_ext_cast_f32(ctx, backend, iter->second); hada_2_up = ggml_cont(ctx, ggml_transpose(ctx, hada_2_up)); } @@ -351,7 +351,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -378,24 +378,24 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(lokr_w1_name); if (iter != lora_tensors.end()) { - lokr_w1 = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1 = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w2_name); if (iter != lora_tensors.end()) { - lokr_w2 = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2 = ggml_ext_cast_f32(ctx, backend, iter->second); } int64_t rank = 1; if (lokr_w1 == nullptr) { iter = lora_tensors.find(lokr_w1_a_name); if (iter != lora_tensors.end()) { - lokr_w1_a = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1_a = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w1_b_name); if (iter != lora_tensors.end()) { - lokr_w1_b = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1_b = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lokr_w1_a == nullptr || lokr_w1_b == nullptr) { @@ -410,12 +410,12 @@ struct LoraModel : public GGMLRunner { if (lokr_w2 == nullptr) { iter = lora_tensors.find(lokr_w2_a_name); if (iter != lora_tensors.end()) { - lokr_w2_a = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2_a = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w2_b_name); if (iter != lora_tensors.end()) { - lokr_w2_b = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2_b = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lokr_w2_a == nullptr || lokr_w2_b == nullptr) { @@ -468,23 +468,23 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { + ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_backend_t backend, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { // lora ggml_tensor* diff = nullptr; if (with_lora_and_lokr) { - diff = get_lora_weight_diff(model_tensor_name, ctx); + diff = get_lora_weight_diff(model_tensor_name, ctx, backend); } // diff if (diff == nullptr) { - diff = get_raw_weight_diff(model_tensor_name, ctx); + diff = get_raw_weight_diff(model_tensor_name, ctx, backend); } // loha if (diff == nullptr) { - diff = get_loha_weight_diff(model_tensor_name, ctx); + diff = get_loha_weight_diff(model_tensor_name, ctx, backend); } // lokr if (diff == nullptr && with_lora_and_lokr) { - diff = get_lokr_weight_diff(model_tensor_name, ctx); + diff = get_lokr_weight_diff(model_tensor_name, ctx, backend); } if (diff != nullptr) { if (ggml_nelements(diff) < ggml_nelements(model_tensor)) { @@ -502,6 +502,7 @@ struct LoraModel : public GGMLRunner { } ggml_tensor* get_out_diff(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, WeightAdapter::ForwardParams forward_params, const std::string& model_tensor_name) { @@ -590,7 +591,7 @@ struct LoraModel : public GGMLRunner { } scale_value *= multiplier; - auto curr_out_diff = ggml_ext_lokr_forward(ctx, x, lokr_w1, lokr_w1_a, lokr_w1_b, lokr_w2, lokr_w2_a, lokr_w2_b, is_conv2d, forward_params.conv2d, scale_value); + auto curr_out_diff = ggml_ext_lokr_forward(ctx, backend, x, lokr_w1, lokr_w1_a, lokr_w1_b, lokr_w2, lokr_w2_a, lokr_w2_b, is_conv2d, forward_params.conv2d, scale_value); if (out_diff == nullptr) { out_diff = curr_out_diff; } else { @@ -761,7 +762,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* model_tensor = it.second; // lora - ggml_tensor* diff = get_weight_diff(model_tensor_name, compute_ctx, model_tensor); + ggml_tensor* diff = get_weight_diff(model_tensor_name, runtime_backend, compute_ctx, model_tensor); if (diff == nullptr) { continue; } @@ -774,7 +775,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* final_tensor; if (model_tensor->type != GGML_TYPE_F32 && model_tensor->type != GGML_TYPE_F16) { - final_tensor = ggml_ext_cast_f32(compute_ctx, model_tensor); + final_tensor = ggml_ext_cast_f32(compute_ctx, runtime_backend, model_tensor); final_tensor = ggml_add_inplace(compute_ctx, final_tensor, diff); final_tensor = ggml_cpy(compute_ctx, final_tensor, model_tensor); } else { @@ -841,34 +842,35 @@ public: : lora_models(lora_models) { } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { + ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { for (auto& lora_model : lora_models) { - ggml_tensor* diff = lora_model->get_weight_diff(weight_name, ctx, weight, with_lora_and_lokr); + ggml_tensor* diff = lora_model->get_weight_diff(weight_name, backend, ctx, weight, with_lora_and_lokr); if (diff == nullptr) { continue; } if (weight->type != GGML_TYPE_F32 && weight->type != GGML_TYPE_F16) { - weight = ggml_ext_cast_f32(ctx, weight); + weight = ggml_ext_cast_f32(ctx, backend, weight); } weight = ggml_add(ctx, weight, diff); } return weight; } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) override { - return patch_weight(ctx, weight, weight_name, true); + ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) override { + return patch_weight(ctx, backend, weight, weight_name, true); } ggml_tensor* forward_with_lora(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, ggml_tensor* w, ggml_tensor* b, const std::string& prefix, WeightAdapter::ForwardParams forward_params) override { - w = patch_weight(ctx, w, prefix + "weight", false); + w = patch_weight(ctx, backend, w, prefix + "weight", false); if (b) { - b = patch_weight(ctx, b, prefix + "bias", false); + b = patch_weight(ctx, backend, b, prefix + "bias", false); } ggml_tensor* out; if (forward_params.op_type == ForwardParams::op_type_t::OP_LINEAR) { @@ -890,7 +892,7 @@ public: forward_params.conv2d.scale); } for (auto& lora_model : lora_models) { - ggml_tensor* out_diff = lora_model->get_out_diff(ctx, x, forward_params, prefix + "weight"); + ggml_tensor* out_diff = lora_model->get_out_diff(ctx, backend, x, forward_params, prefix + "weight"); if (out_diff == nullptr) { continue; } diff --git a/src/model.cpp b/src/model.cpp index 3479a0be..8fdde3b7 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -23,24 +23,11 @@ #include "ggml-alloc.h" #include "ggml-backend.h" -#include "ggml-cpu.h" #include "ggml.h" +#include "ggml_extend_backend.hpp" #include "zip.h" #include "name_conversion.h" -#include "stable-diffusion.h" - -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif /*================================================= Preprocess ==================================================*/ diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 83c8cec6..1cbeb71d 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -95,9 +95,7 @@ namespace Qwen { float scale = 1.f / 32.f; bool force_prec_f32 = false; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example when using CUDA but the weights are k-quants (not all prompts). blocks["to_out.0"] = std::shared_ptr(new Linear(inner_dim, out_dim, out_bias, false, force_prec_f32, scale)); @@ -124,6 +122,10 @@ namespace Qwen { auto to_v = std::dynamic_pointer_cast(blocks["to_v"]); auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); + if (sd_backend_is(ctx->backend, "Vulkan")) { + to_out_0->set_force_prec_f32(true); + } + auto norm_added_q = std::dynamic_pointer_cast(blocks["norm_added_q"]); auto norm_added_k = std::dynamic_pointer_cast(blocks["norm_added_k"]); diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index f4f8894f..88102ff6 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -172,60 +172,7 @@ public: } void init_backend() { -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - size_t device = 0; - const int device_count = ggml_backend_vk_get_device_count(); - if (device_count) { - const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE"); - if (SD_VK_DEVICE != nullptr) { - std::string sd_vk_device_str = SD_VK_DEVICE; - try { - device = std::stoull(sd_vk_device_str); - } catch (const std::invalid_argument&) { - LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } catch (const std::out_of_range&) { - LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } - if (device >= device_count) { - LOG_WARN("Cannot find targeted vulkan device (%zu). Falling back to device 0.", device); - device = 0; - } - } - LOG_INFO("Vulkan: Using device %zu", device); - backend = ggml_backend_vk_init(device); - } - if (!backend) { - LOG_WARN("Failed to initialize Vulkan backend"); - } -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - // ggml_log_set(ggml_log_callback_default, nullptr); // Optional ggml logs - backend = ggml_backend_opencl_init(); - if (!backend) { - LOG_WARN("Failed to initialize OpenCL backend"); - } -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif - - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); - } + backend = sd_get_default_backend(); } std::shared_ptr get_rng(rng_type_t rng_type) { diff --git a/src/upscaler.cpp b/src/upscaler.cpp index ed7bb89a..80e68c94 100644 --- a/src/upscaler.cpp +++ b/src/upscaler.cpp @@ -16,26 +16,9 @@ bool UpscalerGGML::load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu, int n_threads) { ggml_log_set(ggml_log_callback_default, nullptr); -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - backend = ggml_backend_vk_init(0); -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - backend = ggml_backend_opencl_init(); -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif + + backend = sd_get_default_backend(); + ModelLoader model_loader; if (!model_loader.init_from_file_and_convert_name(esrgan_path)) { LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); diff --git a/src/util.cpp b/src/util.cpp index b28471d7..ebae2a27 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -23,8 +23,9 @@ #include #endif -#include "ggml-cpu.h" +#include "ggml-backend.h" #include "ggml.h" +#include "ggml_extend_backend.hpp" #include "stable-diffusion.h" bool ends_with(const std::string& str, const std::string& ending) { @@ -495,26 +496,6 @@ sd_progress_cb_t sd_get_progress_callback() { void* sd_get_progress_callback_data() { return sd_progress_cb_data; } -const char* sd_get_system_info() { - static char buffer[1024]; - std::stringstream ss; - ss << "System Info: \n"; - ss << " SSE3 = " << ggml_cpu_has_sse3() << " | "; - ss << " AVX = " << ggml_cpu_has_avx() << " | "; - ss << " AVX2 = " << ggml_cpu_has_avx2() << " | "; - ss << " AVX512 = " << ggml_cpu_has_avx512() << " | "; - ss << " AVX512_VBMI = " << ggml_cpu_has_avx512_vbmi() << " | "; - ss << " AVX512_VNNI = " << ggml_cpu_has_avx512_vnni() << " | "; - ss << " FMA = " << ggml_cpu_has_fma() << " | "; - ss << " NEON = " << ggml_cpu_has_neon() << " | "; - ss << " ARM_FMA = " << ggml_cpu_has_arm_fma() << " | "; - ss << " F16C = " << ggml_cpu_has_f16c() << " | "; - ss << " FP16_VA = " << ggml_cpu_has_fp16_va() << " | "; - ss << " WASM_SIMD = " << ggml_cpu_has_wasm_simd() << " | "; - ss << " VSX = " << ggml_cpu_has_vsx() << " | "; - snprintf(buffer, sizeof(buffer), "%s", ss.str().c_str()); - return buffer; -} sd_image_t tensor_to_sd_image(const sd::Tensor& tensor, int frame_index) { const auto& shape = tensor.shape(); @@ -718,3 +699,100 @@ std::vector> parse_prompt_attention(const std::str return res; } + +// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. +bool sd_backend_is(ggml_backend_t backend, const std::string& name) { + if (!backend) { + return false; + } + ggml_backend_dev_t dev = ggml_backend_get_device(backend); + if (!dev) + return false; + std::string dev_name = ggml_backend_dev_name(dev); + return dev_name.find(name) != std::string::npos; +} + +ggml_backend_t sd_get_default_backend() { + ggml_backend_load_all_once(); + static std::once_flag once; + std::call_once(once, []() { + size_t dev_count = ggml_backend_dev_count(); + if (dev_count == 0) { + LOG_ERROR("No devices found!"); + } else { + LOG_DEBUG("Found %zu backend devices:", dev_count); + for (size_t i = 0; i < dev_count; ++i) { + auto dev = ggml_backend_dev_get(i); + LOG_DEBUG("#%zu: %s", i, ggml_backend_dev_name(dev)); + } + } + }); + ggml_backend_t backend = nullptr; + const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE"); + if (SD_VK_DEVICE != nullptr) { + std::string sd_vk_device_str = SD_VK_DEVICE; + try { + unsigned long long device = std::stoull(sd_vk_device_str); + std::string vk_device_name = "Vulkan" + std::to_string(device); + if (backend_name_exists(vk_device_name)) { + LOG_INFO("Selecting %s as main device by env var SD_VK_DEVICE", vk_device_name.c_str()); + backend = init_named_backend(vk_device_name); + if (!backend) { + LOG_WARN("Device %s requested by SD_VK_DEVICE failed to init. Falling back to the default device.", vk_device_name.c_str()); + } + } else { + LOG_WARN("Device %s requested by SD_VK_DEVICE was not found. Falling back to the default device.", vk_device_name.c_str()); + } + } catch (const std::invalid_argument&) { + LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to the default device.", SD_VK_DEVICE); + } catch (const std::out_of_range&) { + LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to the default device.", SD_VK_DEVICE); + } + } + + if (!backend) { + std::string dev_name = get_default_backend_name(); + backend = init_named_backend(dev_name); + if (!backend && !dev_name.empty()) { + LOG_WARN("device %s failed to init", dev_name.c_str()); + } + } + + if (!backend) { + LOG_WARN("loading CPU backend"); + backend = ggml_backend_cpu_init(); + } + + if (ggml_backend_is_cpu(backend)) { + LOG_DEBUG("Using CPU backend"); + } + + return backend; +} + +// namespace is needed to avoid conflicts with ggml_backend_extend.hpp +namespace ggml_cpu { +#include "ggml-cpu.h" +} + +const char* sd_get_system_info() { + using namespace ggml_cpu; + static char buffer[1024]; + std::stringstream ss; + ss << "System Info: \n"; + ss << " SSE3 = " << ggml_cpu_has_sse3() << " | "; + ss << " AVX = " << ggml_cpu_has_avx() << " | "; + ss << " AVX2 = " << ggml_cpu_has_avx2() << " | "; + ss << " AVX512 = " << ggml_cpu_has_avx512() << " | "; + ss << " AVX512_VBMI = " << ggml_cpu_has_avx512_vbmi() << " | "; + ss << " AVX512_VNNI = " << ggml_cpu_has_avx512_vnni() << " | "; + ss << " FMA = " << ggml_cpu_has_fma() << " | "; + ss << " NEON = " << ggml_cpu_has_neon() << " | "; + ss << " ARM_FMA = " << ggml_cpu_has_arm_fma() << " | "; + ss << " F16C = " << ggml_cpu_has_f16c() << " | "; + ss << " FP16_VA = " << ggml_cpu_has_fp16_va() << " | "; + ss << " WASM_SIMD = " << ggml_cpu_has_wasm_simd() << " | "; + ss << " VSX = " << ggml_cpu_has_vsx() << " | "; + snprintf(buffer, sizeof(buffer), "%s", ss.str().c_str()); + return buffer; +} diff --git a/src/util.h b/src/util.h index 2468cb93..72c8a815 100644 --- a/src/util.h +++ b/src/util.h @@ -6,6 +6,7 @@ #include #include +#include "ggml-backend.h" #include "stable-diffusion.h" #include "tensor.hpp" @@ -82,6 +83,10 @@ int sd_get_preview_interval(); bool sd_should_preview_denoised(); bool sd_should_preview_noisy(); +// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. +bool sd_backend_is(ggml_backend_t backend, const std::string& name); +ggml_backend_t sd_get_default_backend(); + #define LOG_DEBUG(format, ...) log_printf(SD_LOG_DEBUG, __FILE__, __LINE__, format, ##__VA_ARGS__) #define LOG_INFO(format, ...) log_printf(SD_LOG_INFO, __FILE__, __LINE__, format, ##__VA_ARGS__) #define LOG_WARN(format, ...) log_printf(SD_LOG_WARN, __FILE__, __LINE__, format, ##__VA_ARGS__) diff --git a/src/z_image.hpp b/src/z_image.hpp index 363ce5f4..6bb44b79 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -31,10 +31,6 @@ namespace ZImage { : head_dim(head_dim), num_heads(num_heads), num_kv_heads(num_kv_heads), qk_norm(qk_norm) { blocks["qkv"] = std::make_shared(hidden_size, (num_heads + num_kv_heads * 2) * head_dim, false); float scale = 1.f; -#if GGML_USE_HIP - // Prevent NaN issues with certain ROCm setups - scale = 1.f / 16.f; -#endif blocks["out"] = std::make_shared(num_heads * head_dim, hidden_size, false, false, false, scale); if (qk_norm) { blocks["q_norm"] = std::make_shared(head_dim); @@ -52,6 +48,10 @@ namespace ZImage { auto qkv_proj = std::dynamic_pointer_cast(blocks["qkv"]); auto out_proj = std::dynamic_pointer_cast(blocks["out"]); + if (sd_backend_is(ctx->backend, "ROCm")) { + out_proj->set_scale(1.f / 16.f); + } + auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] @@ -115,9 +115,7 @@ namespace ZImage { bool force_prec_f32 = false; float scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using CUDA but the weights are k-quants. blocks["w2"] = std::make_shared(hidden_dim, dim, false, false, force_prec_f32, scale); @@ -129,6 +127,10 @@ namespace ZImage { auto w2 = std::dynamic_pointer_cast(blocks["w2"]); auto w3 = std::dynamic_pointer_cast(blocks["w3"]); + if (sd_backend_is(ctx->backend, "Vulkan")) { + w2->set_force_prec_f32(true); + } + auto x1 = w1->forward(ctx, x); auto x3 = w3->forward(ctx, x); x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3); From 3d6064b37ef4607917f8acf2ca8c8906d5087413 Mon Sep 17 00:00:00 2001 From: leejet Date: Thu, 30 Apr 2026 01:13:56 +0800 Subject: [PATCH 6/9] perf: speed up tensor_to_sd_image conversion (#1466) --- src/preprocessing.hpp | 84 +++++++++++++++++++++++++++++++++++-------- src/util.cpp | 12 +------ 2 files changed, 71 insertions(+), 25 deletions(-) diff --git a/src/preprocessing.hpp b/src/preprocessing.hpp index 7c83a289..57ab0cec 100644 --- a/src/preprocessing.hpp +++ b/src/preprocessing.hpp @@ -24,6 +24,75 @@ static inline void preprocessing_set_4d(sd::Tensor& tensor, float value, tensor.values()[static_cast(preprocessing_offset_4d(tensor, i0, i1, i2, i3))] = value; } +static inline uint8_t preprocessing_float_to_u8(float value) { + if (value <= 0.0f) { + return 0; + } + if (value >= 1.0f) { + return 255; + } + return static_cast(value * 255.0f + 0.5f); +} + +static inline void preprocessing_tensor_frame_to_sd_image(const sd::Tensor& tensor, int frame_index, uint8_t* image_data) { + const auto& shape = tensor.shape(); + GGML_ASSERT(shape.size() == 4 || shape.size() == 5); + GGML_ASSERT(image_data != nullptr); + + const int width = static_cast(shape[0]); + const int height = static_cast(shape[1]); + const int channel = static_cast(shape[shape.size() == 5 ? 3 : 2]); + const size_t pixels = static_cast(width) * static_cast(height); + const float* src = tensor.data(); + + if (shape.size() == 4) { + GGML_ASSERT(frame_index >= 0 && frame_index < shape[3]); + const size_t frame_stride = pixels * static_cast(channel); + const float* frame_ptr = src + static_cast(frame_index) * frame_stride; + if (channel == 3) { + const float* c0 = frame_ptr; + const float* c1 = frame_ptr + pixels; + const float* c2 = frame_ptr + pixels * 2; + for (size_t i = 0; i < pixels; ++i) { + image_data[i * 3 + 0] = preprocessing_float_to_u8(c0[i]); + image_data[i * 3 + 1] = preprocessing_float_to_u8(c1[i]); + image_data[i * 3 + 2] = preprocessing_float_to_u8(c2[i]); + } + return; + } + + for (size_t i = 0; i < pixels; ++i) { + for (int c = 0; c < channel; ++c) { + image_data[i * static_cast(channel) + static_cast(c)] = + preprocessing_float_to_u8(frame_ptr[i + pixels * static_cast(c)]); + } + } + return; + } + + GGML_ASSERT(frame_index >= 0 && frame_index < shape[2]); + const size_t channel_stride = pixels * static_cast(shape[2]); + const float* frame_ptr = src + static_cast(frame_index) * pixels; + if (channel == 3) { + const float* c0 = frame_ptr; + const float* c1 = frame_ptr + channel_stride; + const float* c2 = frame_ptr + channel_stride * 2; + for (size_t i = 0; i < pixels; ++i) { + image_data[i * 3 + 0] = preprocessing_float_to_u8(c0[i]); + image_data[i * 3 + 1] = preprocessing_float_to_u8(c1[i]); + image_data[i * 3 + 2] = preprocessing_float_to_u8(c2[i]); + } + return; + } + + for (size_t i = 0; i < pixels; ++i) { + for (int c = 0; c < channel; ++c) { + image_data[i * static_cast(channel) + static_cast(c)] = + preprocessing_float_to_u8(frame_ptr[i + channel_stride * static_cast(c)]); + } + } +} + static inline sd::Tensor sd_image_to_preprocessing_tensor(sd_image_t image) { sd::Tensor tensor({static_cast(image.width), static_cast(image.height), static_cast(image.channel), 1}); for (uint32_t y = 0; y < image.height; ++y) { @@ -39,20 +108,7 @@ static inline sd::Tensor sd_image_to_preprocessing_tensor(sd_image_t imag static inline void preprocessing_tensor_to_sd_image(const sd::Tensor& tensor, uint8_t* image_data) { GGML_ASSERT(tensor.dim() == 4); GGML_ASSERT(tensor.shape()[3] == 1); - GGML_ASSERT(image_data != nullptr); - - int width = static_cast(tensor.shape()[0]); - int height = static_cast(tensor.shape()[1]); - int channel = static_cast(tensor.shape()[2]); - for (int y = 0; y < height; ++y) { - for (int x = 0; x < width; ++x) { - for (int c = 0; c < channel; ++c) { - float value = preprocessing_get_4d(tensor, x, y, c, 0); - value = std::min(1.0f, std::max(0.0f, value)); - image_data[(y * width + x) * channel + c] = static_cast(std::round(value * 255.0f)); - } - } - } + preprocessing_tensor_frame_to_sd_image(tensor, 0, image_data); } static inline sd::Tensor gaussian_kernel_tensor(int kernel_size) { diff --git a/src/util.cpp b/src/util.cpp index ebae2a27..0b514bb7 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -505,17 +505,7 @@ sd_image_t tensor_to_sd_image(const sd::Tensor& tensor, int frame_index) int channel = static_cast(shape[shape.size() == 5 ? 3 : 2]); uint8_t* data = (uint8_t*)malloc(static_cast(width * height * channel)); GGML_ASSERT(data != nullptr); - - for (int iw = 0; iw < width; ++iw) { - for (int ih = 0; ih < height; ++ih) { - for (int ic = 0; ic < channel; ++ic) { - float value = shape.size() == 5 ? tensor.index(iw, ih, frame_index, ic, 0) - : tensor.index(iw, ih, ic, frame_index); - value = std::clamp(value, 0.0f, 1.0f); - data[(ih * width + iw) * channel + ic] = static_cast(std::round(value * 255.0f)); - } - } - } + preprocessing_tensor_frame_to_sd_image(tensor, frame_index, data); return { static_cast(width), static_cast(height), From 9097ce5211950a98e2fd557c68d0a43bc7c2de0a Mon Sep 17 00:00:00 2001 From: fszontagh <51741446+fszontagh@users.noreply.github.com> Date: Wed, 6 May 2026 15:45:47 +0200 Subject: [PATCH 7/9] fix: skip empty MultiLoraAdapter when no LoRAs target a model (#1469) --- src/stable-diffusion.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 88102ff6..860cff85 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1104,8 +1104,13 @@ public: cond_stage_lora_models.push_back(lora); } } - auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); - cond_stage_model->set_weight_adapter(multi_lora_adapter); + // Only attach the adapter when there are LoRAs targeting the cond_stage model. + // An empty MultiLoraAdapter still routes every linear/conv through + // forward_with_lora() instead of the direct kernel path — slower for no benefit. + if (!cond_stage_lora_models.empty()) { + auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); + cond_stage_model->set_weight_adapter(multi_lora_adapter); + } } if (diffusion_model) { std::vector> lora_models; @@ -1136,10 +1141,12 @@ public: diffusion_lora_models.push_back(lora); } } - auto multi_lora_adapter = std::make_shared(diffusion_lora_models); - diffusion_model->set_weight_adapter(multi_lora_adapter); - if (high_noise_diffusion_model) { - high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter); + if (!diffusion_lora_models.empty()) { + auto multi_lora_adapter = std::make_shared(diffusion_lora_models); + diffusion_model->set_weight_adapter(multi_lora_adapter); + if (high_noise_diffusion_model) { + high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter); + } } } @@ -1172,8 +1179,10 @@ public: first_stage_lora_models.push_back(lora); } } - auto multi_lora_adapter = std::make_shared(first_stage_lora_models); - first_stage_model->set_weight_adapter(multi_lora_adapter); + if (!first_stage_lora_models.empty()) { + auto multi_lora_adapter = std::make_shared(first_stage_lora_models); + first_stage_model->set_weight_adapter(multi_lora_adapter); + } } } From 586b6f148111ce7614d5d90c8f5c31633d7b4b0a Mon Sep 17 00:00:00 2001 From: Wagner Bruna Date: Wed, 6 May 2026 10:49:06 -0300 Subject: [PATCH 8/9] feat: adapt res samplers for flow models for eta > 0 (#1436) --- src/denoiser.hpp | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/src/denoiser.hpp b/src/denoiser.hpp index a6e81d59..831da258 100644 --- a/src/denoiser.hpp +++ b/src/denoiser.hpp @@ -808,6 +808,18 @@ static std::tuple get_ancestral_step_flow(float sigma_from, return {sigma_down, sigma_up, alpha_scale}; } +static std::tuple get_ancestral_step(float sigma_from, + float sigma_to, + float eta, + bool is_flow_denoiser) { + if (is_flow_denoiser) { + return get_ancestral_step_flow(sigma_from, sigma_to, eta); + } else { + auto [sigma_down, sigma_up] = get_ancestral_step(sigma_from, sigma_to, eta); + return {sigma_down, sigma_up, 1.0f}; + } +} + static sd::Tensor sample_euler_ancestral(denoise_cb_t model, sd::Tensor x, const std::vector& sigmas, @@ -1247,6 +1259,7 @@ static sd::Tensor sample_res_multistep(denoise_cb_t model, sd::Tensor x, const std::vector& sigmas, std::shared_ptr rng, + bool is_flow_denoiser, float eta) { sd::Tensor old_denoised = x; bool have_old_sigma = false; @@ -1278,7 +1291,8 @@ static sd::Tensor sample_res_multistep(denoise_cb_t model, float sigma_from = sigmas[i]; float sigma_to = sigmas[i + 1]; - auto [sigma_down, sigma_up] = get_ancestral_step(sigma_from, sigma_to, eta); + + auto [sigma_down, sigma_up, alpha_scale] = get_ancestral_step(sigma_from, sigma_to, eta, is_flow_denoiser); if (sigma_down == 0.0f || !have_old_sigma) { x += ((x - denoised) / sigma_from) * (sigma_down - sigma_from); @@ -1305,7 +1319,10 @@ static sd::Tensor sample_res_multistep(denoise_cb_t model, x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised); } - if (sigmas[i + 1] > 0 && sigma_up > 0.0f) { + if (sigma_to > 0.0f && sigma_up > 0.0f) { + if (is_flow_denoiser) { + x *= alpha_scale; + } x += sd::Tensor::randn_like(x, rng) * sigma_up; } @@ -1320,6 +1337,7 @@ static sd::Tensor sample_res_2s(denoise_cb_t model, sd::Tensor x, const std::vector& sigmas, std::shared_ptr rng, + bool is_flow_denoiser, float eta) { const float c2 = 0.5f; auto t_fn = [](float sigma) -> float { return -logf(sigma); }; @@ -1348,7 +1366,7 @@ static sd::Tensor sample_res_2s(denoise_cb_t model, } sd::Tensor denoised = std::move(denoised_opt); - auto [sigma_down, sigma_up] = get_ancestral_step(sigma_from, sigma_to, eta); + auto [sigma_down, sigma_up, alpha_scale] = get_ancestral_step(sigma_from, sigma_to, eta, is_flow_denoiser); sd::Tensor x0 = x; if (sigma_down == 0.0f || sigma_from == 0.0f) { @@ -1377,7 +1395,10 @@ static sd::Tensor sample_res_2s(denoise_cb_t model, x = x0 + h * (b1 * eps1 + b2 * eps2); } - if (sigmas[i + 1] > 0 && sigma_up > 0.0f) { + if (sigma_to > 0.0f && sigma_up > 0.0f) { + if (is_flow_denoiser) { + x *= alpha_scale; + } x += sd::Tensor::randn_like(x, rng) * sigma_up; } } @@ -1664,9 +1685,9 @@ static sd::Tensor sample_k_diffusion(sample_method_t method, case IPNDM_V_SAMPLE_METHOD: return sample_ipndm_v(model, std::move(x), sigmas); case RES_MULTISTEP_SAMPLE_METHOD: - return sample_res_multistep(model, std::move(x), sigmas, rng, eta); + return sample_res_multistep(model, std::move(x), sigmas, rng, is_flow_denoiser, eta); case RES_2S_SAMPLE_METHOD: - return sample_res_2s(model, std::move(x), sigmas, rng, eta); + return sample_res_2s(model, std::move(x), sigmas, rng, is_flow_denoiser, eta); case ER_SDE_SAMPLE_METHOD: return sample_er_sde(model, std::move(x), sigmas, rng, is_flow_denoiser, eta); case DDIM_TRAILING_SAMPLE_METHOD: From 90e87bc846f17059771efb8aaa31e9ef0cab6f78 Mon Sep 17 00:00:00 2001 From: leejet Date: Wed, 6 May 2026 21:56:02 +0800 Subject: [PATCH 9/9] feat: add max-vram based segmented param offload (#1476) --- examples/cli/README.md | 2 + examples/common/common.cpp | 9 +- examples/common/common.h | 1 + examples/server/README.md | 2 + include/stable-diffusion.h | 1 + src/anima.hpp | 6 + src/auto_encoder_kl.hpp | 10 + src/clip.hpp | 14 +- src/conditioner.hpp | 45 ++- src/diffusion_model.hpp | 33 ++ src/ernie_image.hpp | 5 +- src/esrgan.hpp | 8 +- src/flux.hpp | 6 + src/ggml_extend.hpp | 617 ++++++++++++++++++++++++++++++--- src/ggml_graph_cut.cpp | 676 +++++++++++++++++++++++++++++++++++++ src/ggml_graph_cut.h | 104 ++++++ src/llm.hpp | 8 + src/mmdit.hpp | 7 + src/qwen_image.hpp | 5 + src/stable-diffusion.cpp | 22 +- src/t5.hpp | 9 +- src/unet.hpp | 6 + src/upscaler.cpp | 8 + src/upscaler.h | 6 +- src/wan.hpp | 25 +- src/z_image.hpp | 7 + 26 files changed, 1576 insertions(+), 66 deletions(-) create mode 100644 src/ggml_graph_cut.cpp create mode 100644 src/ggml_graph_cut.h diff --git a/examples/cli/README.md b/examples/cli/README.md index 7b620fee..b32fe37f 100644 --- a/examples/cli/README.md +++ b/examples/cli/README.md @@ -54,6 +54,8 @@ Context Options: -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma + --max-vram maximum VRAM budget in GiB for graph-cut segmented execution. 0 disables + graph splitting --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed diff --git a/examples/common/common.cpp b/examples/common/common.cpp index 1a5399b8..d4c8a72b 100644 --- a/examples/common/common.cpp +++ b/examples/common/common.cpp @@ -394,7 +394,12 @@ ArgOptions SDContextParams::get_options() { &chroma_t5_mask_pad}, }; - options.float_options = {}; + options.float_options = { + {"", + "--max-vram", + "maximum VRAM budget in GiB for graph-cut segmented execution. 0 disables graph splitting", + &max_vram}, + }; options.bool_options = { {"", @@ -670,6 +675,7 @@ std::string SDContextParams::to_string() const { << " rng_type: " << sd_rng_type_name(rng_type) << ",\n" << " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n" << " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n" + << " max_vram: " << max_vram << ",\n" << " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n" << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" @@ -744,6 +750,7 @@ sd_ctx_params_t SDContextParams::to_sd_ctx_params_t(bool vae_decode_only, bool f chroma_use_t5_mask, chroma_t5_mask_pad, qwen_image_zero_cond_t, + max_vram, }; return sd_ctx_params; } diff --git a/examples/common/common.h b/examples/common/common.h index c4498c35..f87293f3 100644 --- a/examples/common/common.h +++ b/examples/common/common.h @@ -109,6 +109,7 @@ struct SDContextParams { rng_type_t rng_type = CUDA_RNG; rng_type_t sampler_rng_type = RNG_TYPE_COUNT; bool offload_params_to_cpu = false; + float max_vram = 0.f; bool enable_mmap = false; bool control_net_cpu = false; bool clip_on_cpu = false; diff --git a/examples/server/README.md b/examples/server/README.md index 469dd346..23b79c9d 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -156,6 +156,8 @@ Context Options: -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma + --max-vram maximum VRAM budget in GiB for graph-cut segmented execution. 0 disables + graph splitting --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 75027f8f..c4c14949 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -203,6 +203,7 @@ typedef struct { bool chroma_use_t5_mask; int chroma_t5_mask_pad; bool qwen_image_zero_cond_t; + float max_vram; } sd_ctx_params_t; typedef struct { diff --git a/src/anima.hpp b/src/anima.hpp index 5850cc3e..4bfc0474 100644 --- a/src/anima.hpp +++ b/src/anima.hpp @@ -499,9 +499,15 @@ namespace Anima { encoder_hidden_states = adapted_context; } + sd::ggml_graph_cut::mark_graph_cut(x, "anima.prelude", "x"); + sd::ggml_graph_cut::mark_graph_cut(embedded_timestep, "anima.prelude", "embedded_timestep"); + sd::ggml_graph_cut::mark_graph_cut(temb, "anima.prelude", "temb"); + sd::ggml_graph_cut::mark_graph_cut(encoder_hidden_states, "anima.prelude", "context"); + for (int i = 0; i < num_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["blocks." + std::to_string(i)]); x = block->forward(ctx, x, encoder_hidden_states, embedded_timestep, temb, image_pe); + sd::ggml_graph_cut::mark_graph_cut(x, "anima.blocks." + std::to_string(i), "x"); } x = final_layer->forward(ctx, x, embedded_timestep, temb); // [N, h*w, ph*pw*C] diff --git a/src/auto_encoder_kl.hpp b/src/auto_encoder_kl.hpp index 5cf09b88..4fb28a16 100644 --- a/src/auto_encoder_kl.hpp +++ b/src/auto_encoder_kl.hpp @@ -328,6 +328,7 @@ public: auto conv_out = std::dynamic_pointer_cast(blocks["conv_out"]); auto h = conv_in->forward(ctx, x); // [N, ch, h, w] + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.prelude", "h"); // downsampling size_t num_resolutions = ch_mult.size(); @@ -337,12 +338,14 @@ public: auto down_block = std::dynamic_pointer_cast(blocks[name]); h = down_block->forward(ctx, h); + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.down." + std::to_string(i) + ".block." + std::to_string(j), "h"); } if (i != num_resolutions - 1) { std::string name = "down." + std::to_string(i) + ".downsample"; auto down_sample = std::dynamic_pointer_cast(blocks[name]); h = down_sample->forward(ctx, h); + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.down." + std::to_string(i) + ".downsample", "h"); } } @@ -350,6 +353,7 @@ public: h = mid_block_1->forward(ctx, h); h = mid_attn_1->forward(ctx, h); h = mid_block_2->forward(ctx, h); // [N, block_in, h, w] + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.encoder.mid", "h"); // end h = norm_out->forward(ctx, h); @@ -450,6 +454,7 @@ public: // conv_in auto h = conv_in->forward(ctx, z); // [N, block_in, h, w] + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.prelude", "h"); // middle h = mid_block_1->forward(ctx, h); @@ -457,6 +462,7 @@ public: h = mid_attn_1->forward(ctx, h); h = mid_block_2->forward(ctx, h); // [N, block_in, h, w] + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.mid", "h"); // upsampling int num_resolutions = static_cast(ch_mult.size()); @@ -466,12 +472,14 @@ public: auto up_block = std::dynamic_pointer_cast(blocks[name]); h = up_block->forward(ctx, h); + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.up." + std::to_string(i) + ".block." + std::to_string(j), "h"); } if (i != 0) { std::string name = "up." + std::to_string(i) + ".upsample"; auto up_sample = std::dynamic_pointer_cast(blocks[name]); h = up_sample->forward(ctx, h); + // sd::ggml_graph_cut::mark_graph_cut(h, "vae.decoder.up." + std::to_string(i) + ".upsample", "h"); } } @@ -599,6 +607,7 @@ public: if (use_quant) { auto post_quant_conv = std::dynamic_pointer_cast(blocks["post_quant_conv"]); z = post_quant_conv->forward(ctx, z); // [N, z_channels, h, w] + // sd::ggml_graph_cut::mark_graph_cut(z, "vae.decode.prelude", "z"); } auto decoder = std::dynamic_pointer_cast(blocks["decoder"]); @@ -616,6 +625,7 @@ public: if (use_quant) { auto quant_conv = std::dynamic_pointer_cast(blocks["quant_conv"]); z = quant_conv->forward(ctx, z); // [N, 2*embed_dim, h/8, w/8] + // sd::ggml_graph_cut::mark_graph_cut(z, "vae.encode.final", "z"); } if (sd_version_uses_flux2_vae(version)) { z = ggml_ext_chunk(ctx->ggml_ctx, z, 2, 2)[0]; diff --git a/src/clip.hpp b/src/clip.hpp index 8a2070e0..8b2084c4 100644 --- a/src/clip.hpp +++ b/src/clip.hpp @@ -95,8 +95,9 @@ public: ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x, - ggml_tensor* mask = nullptr, - int clip_skip = -1) { + ggml_tensor* mask = nullptr, + int clip_skip = -1, + const std::string& graph_cut_prefix = "") { // x: [N, n_token, d_model] int layer_idx = n_layer - 1; // LOG_DEBUG("clip_skip %d", clip_skip); @@ -112,6 +113,9 @@ public: std::string name = "layers." + std::to_string(i); auto layer = std::dynamic_pointer_cast(blocks[name]); x = layer->forward(ctx, x, mask); // [N, n_token, d_model] + if (!graph_cut_prefix.empty()) { + sd::ggml_graph_cut::mark_graph_cut(x, graph_cut_prefix + ".layers." + std::to_string(i), "x"); + } // LOG_DEBUG("layer %d", i); } return x; @@ -304,7 +308,8 @@ public: auto final_layer_norm = std::dynamic_pointer_cast(blocks["final_layer_norm"]); auto x = embeddings->forward(ctx, input_ids, tkn_embeddings); // [N, n_token, hidden_size] - x = encoder->forward(ctx, x, mask, return_pooled ? -1 : clip_skip); + sd::ggml_graph_cut::mark_graph_cut(x, "clip_text.prelude", "x"); + x = encoder->forward(ctx, x, mask, return_pooled ? -1 : clip_skip, "clip_text"); if (return_pooled || with_final_ln) { x = final_layer_norm->forward(ctx, x); } @@ -368,7 +373,8 @@ public: auto x = embeddings->forward(ctx, pixel_values); // [N, num_positions, embed_dim] x = pre_layernorm->forward(ctx, x); - x = encoder->forward(ctx, x, nullptr, clip_skip); + sd::ggml_graph_cut::mark_graph_cut(x, "clip_vision.prelude", "x"); + x = encoder->forward(ctx, x, nullptr, clip_skip, "clip_vision"); auto last_hidden_state = x; diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 9f4d4552..4907938b 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -85,7 +85,8 @@ public: virtual void free_params_buffer() = 0; virtual void get_param_tensors(std::map& tensors) = 0; virtual size_t get_params_buffer_size() = 0; - virtual void set_flash_attention_enabled(bool enabled) = 0; + virtual void set_max_graph_vram_bytes(size_t max_vram_bytes) {} + virtual void set_flash_attention_enabled(bool enabled) = 0; virtual void set_weight_adapter(const std::shared_ptr& adapter) {} virtual std::tuple> get_learned_condition_with_trigger(int n_threads, const ConditionerParams& conditioner_params) { @@ -165,6 +166,13 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { return buffer_size; } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + text_model->set_max_graph_vram_bytes(max_vram_bytes); + if (sd_version_is_sdxl(version)) { + text_model2->set_max_graph_vram_bytes(max_vram_bytes); + } + } + void set_flash_attention_enabled(bool enabled) override { text_model->set_flash_attention_enabled(enabled); if (sd_version_is_sdxl(version)) { @@ -781,6 +789,18 @@ struct SD3CLIPEmbedder : public Conditioner { return buffer_size; } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + if (clip_l) { + clip_l->set_max_graph_vram_bytes(max_vram_bytes); + } + if (clip_g) { + clip_g->set_max_graph_vram_bytes(max_vram_bytes); + } + if (t5) { + t5->set_max_graph_vram_bytes(max_vram_bytes); + } + } + void set_flash_attention_enabled(bool enabled) override { if (clip_l) { clip_l->set_flash_attention_enabled(enabled); @@ -1124,6 +1144,15 @@ struct FluxCLIPEmbedder : public Conditioner { return buffer_size; } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + if (clip_l) { + clip_l->set_max_graph_vram_bytes(max_vram_bytes); + } + if (t5) { + t5->set_max_graph_vram_bytes(max_vram_bytes); + } + } + void set_flash_attention_enabled(bool enabled) override { if (clip_l) { clip_l->set_flash_attention_enabled(enabled); @@ -1349,6 +1378,12 @@ struct T5CLIPEmbedder : public Conditioner { return buffer_size; } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + if (t5) { + t5->set_max_graph_vram_bytes(max_vram_bytes); + } + } + void set_flash_attention_enabled(bool enabled) override { if (t5) { t5->set_flash_attention_enabled(enabled); @@ -1525,6 +1560,10 @@ struct AnimaConditioner : public Conditioner { return llm->get_params_buffer_size(); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + llm->set_max_graph_vram_bytes(max_vram_bytes); + } + void set_flash_attention_enabled(bool enabled) override { llm->set_flash_attention_enabled(enabled); } @@ -1657,6 +1696,10 @@ struct LLMEmbedder : public Conditioner { return buffer_size; } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + llm->set_max_graph_vram_bytes(max_vram_bytes); + } + void set_flash_attention_enabled(bool enabled) override { llm->set_flash_attention_enabled(enabled); } diff --git a/src/diffusion_model.hpp b/src/diffusion_model.hpp index c0a2a11c..1a202a1a 100644 --- a/src/diffusion_model.hpp +++ b/src/diffusion_model.hpp @@ -49,6 +49,7 @@ struct DiffusionModel { virtual void set_weight_adapter(const std::shared_ptr& adapter){}; virtual int64_t get_adm_in_channels() = 0; virtual void set_flash_attention_enabled(bool enabled) = 0; + virtual void set_max_graph_vram_bytes(size_t max_vram_bytes) = 0; virtual void set_circular_axes(bool circular_x, bool circular_y) = 0; }; @@ -98,6 +99,10 @@ struct UNetModel : public DiffusionModel { unet.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + unet.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { unet.set_circular_axes(circular_x, circular_y); } @@ -164,6 +169,10 @@ struct MMDiTModel : public DiffusionModel { mmdit.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + mmdit.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { mmdit.set_circular_axes(circular_x, circular_y); } @@ -229,6 +238,10 @@ struct FluxModel : public DiffusionModel { flux.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + flux.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { flux.set_circular_axes(circular_x, circular_y); } @@ -299,6 +312,10 @@ struct AnimaModel : public DiffusionModel { anima.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + anima.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { anima.set_circular_axes(circular_x, circular_y); } @@ -364,6 +381,10 @@ struct WanModel : public DiffusionModel { wan.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + wan.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { wan.set_circular_axes(circular_x, circular_y); } @@ -433,6 +454,10 @@ struct QwenImageModel : public DiffusionModel { qwen_image.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + qwen_image.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { qwen_image.set_circular_axes(circular_x, circular_y); } @@ -499,6 +524,10 @@ struct ZImageModel : public DiffusionModel { z_image.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + z_image.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { z_image.set_circular_axes(circular_x, circular_y); } @@ -564,6 +593,10 @@ struct ErnieImageModel : public DiffusionModel { ernie_image.set_flash_attention_enabled(enabled); } + void set_max_graph_vram_bytes(size_t max_vram_bytes) override { + ernie_image.set_max_graph_vram_bytes(max_vram_bytes); + } + void set_circular_axes(bool circular_x, bool circular_y) override { ernie_image.set_circular_axes(circular_x, circular_y); } diff --git a/src/ernie_image.hpp b/src/ernie_image.hpp index d17648d2..931794f1 100644 --- a/src/ernie_image.hpp +++ b/src/ernie_image.hpp @@ -295,7 +295,9 @@ namespace ErnieImage { auto c = time_embedding->forward(ctx, sample); // [N, hidden_size] auto mod_params = adaLN_mod->forward(ctx, ggml_silu(ctx->ggml_ctx, c)); // [N, 6 * hidden_size] - auto chunks = ggml_ext_chunk(ctx->ggml_ctx, mod_params, 6, 0); + sd::ggml_graph_cut::mark_graph_cut(hidden_states, "ernie_image.prelude", "hidden_states"); + // sd::ggml_graph_cut::mark_graph_cut(mod_params, "ernie_image.prelude", "mod_params"); + auto chunks = ggml_ext_chunk(ctx->ggml_ctx, mod_params, 6, 0); std::vector temb; temb.reserve(6); for (auto chunk : chunks) { @@ -305,6 +307,7 @@ namespace ErnieImage { for (int i = 0; i < params.num_layers; i++) { auto layer = std::dynamic_pointer_cast(blocks["layers." + std::to_string(i)]); hidden_states = layer->forward(ctx, hidden_states, pe, temb); + sd::ggml_graph_cut::mark_graph_cut(hidden_states, "ernie_image.layers." + std::to_string(i), "hidden_states"); } hidden_states = final_norm->forward(ctx, hidden_states, c); diff --git a/src/esrgan.hpp b/src/esrgan.hpp index 26c46f5b..f84b77a2 100644 --- a/src/esrgan.hpp +++ b/src/esrgan.hpp @@ -124,27 +124,33 @@ public: auto conv_hr = std::dynamic_pointer_cast(blocks["conv_hr"]); auto conv_last = std::dynamic_pointer_cast(blocks["conv_last"]); - auto feat = conv_first->forward(ctx, x); + auto feat = conv_first->forward(ctx, x); + sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.prelude", "feat"); auto body_feat = feat; for (int i = 0; i < num_block; i++) { std::string name = "body." + std::to_string(i); auto block = std::dynamic_pointer_cast(blocks[name]); body_feat = block->forward(ctx, body_feat); + sd::ggml_graph_cut::mark_graph_cut(body_feat, "esrgan.body." + std::to_string(i), "feat"); } body_feat = conv_body->forward(ctx, body_feat); feat = ggml_add(ctx->ggml_ctx, feat, body_feat); + sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.body.out", "feat"); // upsample if (scale >= 2) { auto conv_up1 = std::dynamic_pointer_cast(blocks["conv_up1"]); feat = lrelu(ctx, conv_up1->forward(ctx, ggml_upscale(ctx->ggml_ctx, feat, 2, GGML_SCALE_MODE_NEAREST))); + sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.up1", "feat"); if (scale == 4) { auto conv_up2 = std::dynamic_pointer_cast(blocks["conv_up2"]); feat = lrelu(ctx, conv_up2->forward(ctx, ggml_upscale(ctx->ggml_ctx, feat, 2, GGML_SCALE_MODE_NEAREST))); + sd::ggml_graph_cut::mark_graph_cut(feat, "esrgan.up2", "feat"); } } // for all scales auto out = conv_last->forward(ctx, lrelu(ctx, conv_hr->forward(ctx, feat))); + sd::ggml_graph_cut::mark_graph_cut(out, "esrgan.final", "out"); return out; } }; diff --git a/src/flux.hpp b/src/flux.hpp index e6bf002f..732a3719 100644 --- a/src/flux.hpp +++ b/src/flux.hpp @@ -928,6 +928,9 @@ namespace Flux { } txt = txt_in->forward(ctx, txt); + sd::ggml_graph_cut::mark_graph_cut(img, "flux.prelude", "img"); + sd::ggml_graph_cut::mark_graph_cut(txt, "flux.prelude", "txt"); + sd::ggml_graph_cut::mark_graph_cut(vec, "flux.prelude", "vec"); for (int i = 0; i < params.depth; i++) { if (skip_layers.size() > 0 && std::find(skip_layers.begin(), skip_layers.end(), i) != skip_layers.end()) { @@ -939,6 +942,8 @@ namespace Flux { auto img_txt = block->forward(ctx, img, txt, vec, pe, txt_img_mask, ds_img_mods, ds_txt_mods); img = img_txt.first; // [N, n_img_token, hidden_size] txt = img_txt.second; // [N, n_txt_token, hidden_size] + sd::ggml_graph_cut::mark_graph_cut(img, "flux.double_blocks." + std::to_string(i), "img"); + sd::ggml_graph_cut::mark_graph_cut(txt, "flux.double_blocks." + std::to_string(i), "txt"); } auto txt_img = ggml_concat(ctx->ggml_ctx, txt, img, 1); // [N, n_txt_token + n_img_token, hidden_size] @@ -949,6 +954,7 @@ namespace Flux { auto block = std::dynamic_pointer_cast(blocks["single_blocks." + std::to_string(i)]); txt_img = block->forward(ctx, txt_img, vec, pe, txt_img_mask, ss_mods); + sd::ggml_graph_cut::mark_graph_cut(txt_img, "flux.single_blocks." + std::to_string(i), "txt_img"); } img = ggml_view_3d(ctx->ggml_ctx, diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 8b748194..36230322 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -26,6 +27,7 @@ #include "ggml-backend.h" #include "ggml.h" #include "ggml_extend_backend.hpp" +#include "ggml_graph_cut.h" #include "model.h" #include "tensor.hpp" @@ -1708,6 +1710,8 @@ struct GGMLRunnerContext { struct GGMLRunner { protected: typedef std::function get_graph_cb_t; + using GraphCutSegment = sd::ggml_graph_cut::Segment; + using GraphCutPlan = sd::ggml_graph_cut::Plan; ggml_backend_t params_backend = nullptr; ggml_backend_t runtime_backend = nullptr; @@ -1724,6 +1728,11 @@ protected: ggml_context* compute_ctx = nullptr; ggml_gallocr* compute_allocr = nullptr; + ggml_context* partial_offload_ctx = nullptr; + ggml_backend_buffer_t partial_runtime_params_buffer = nullptr; + std::vector> partial_offload_pairs; + size_t max_graph_vram_bytes = 0; + std::shared_ptr weight_adapter = nullptr; std::vector one_vec = {1.f}; @@ -1741,6 +1750,9 @@ protected: bool circular_x_enabled = false; bool circular_y_enabled = false; + sd::ggml_graph_cut::PlanCache graph_cut_plan_cache_; + std::unordered_set params_tensor_set_; + template static sd::Tensor take_or_empty(std::optional> tensor) { if (!tensor.has_value()) { @@ -1775,6 +1787,7 @@ protected: params_ctx = ggml_init(params); GGML_ASSERT(params_ctx != nullptr); + params_tensor_set_.clear(); if (params_backend != runtime_backend) { offload_ctx = ggml_init(params); GGML_ASSERT(offload_ctx != nullptr); @@ -1786,10 +1799,15 @@ protected: ggml_free(params_ctx); params_ctx = nullptr; } + params_tensor_set_.clear(); if (offload_ctx != nullptr) { ggml_free(offload_ctx); offload_ctx = nullptr; } + if (partial_offload_ctx != nullptr) { + ggml_free(partial_offload_ctx); + partial_offload_ctx = nullptr; + } } void alloc_cache_ctx() { @@ -1824,6 +1842,17 @@ protected: ggml_free(compute_ctx); compute_ctx = nullptr; } + backend_tensor_data_map.clear(); + } + + void rebuild_params_tensor_set() { + params_tensor_set_.clear(); + if (params_ctx == nullptr) { + return; + } + for (ggml_tensor* t = ggml_get_first_tensor(params_ctx); t != nullptr; t = ggml_get_next_tensor(params_ctx, t)) { + params_tensor_set_.insert(t); + } } void prepare_build_in_tensor_before() { @@ -1859,13 +1888,25 @@ protected: return gf; } - bool alloc_compute_buffer(get_graph_cb_t get_graph) { + bool prepare_compute_graph(get_graph_cb_t get_graph, + ggml_cgraph** gf_out) { + GGML_ASSERT(gf_out != nullptr); + + reset_compute_ctx(); + ggml_cgraph* gf = get_compute_graph(get_graph); + if (gf == nullptr) { + free_compute_ctx(); + return false; + } + + *gf_out = gf; + return true; + } + + bool alloc_compute_buffer(ggml_cgraph* gf) { if (compute_allocr != nullptr) { return true; } - reset_compute_ctx(); - ggml_cgraph* gf = get_compute_graph(get_graph); - backend_tensor_data_map.clear(); compute_allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(runtime_backend)); if (!ggml_gallocr_reserve(compute_allocr, gf)) { @@ -1891,47 +1932,132 @@ protected: } } - void copy_cache_tensors_to_cache_buffer() { - if (cache_tensor_map.size() == 0) { - return; + bool copy_cache_tensors_to_cache_buffer(const std::unordered_set* cache_keep_names = nullptr) { + ggml_context* old_cache_ctx = cache_ctx; + ggml_backend_buffer_t old_cache_buffer = cache_buffer; + cache_ctx = nullptr; + cache_buffer = nullptr; + std::map merged_cache_sources; + if (old_cache_ctx != nullptr) { + for (ggml_tensor* tensor = ggml_get_first_tensor(old_cache_ctx); tensor != nullptr; tensor = ggml_get_next_tensor(old_cache_ctx, tensor)) { + if (cache_keep_names != nullptr && cache_keep_names->find(tensor->name) == cache_keep_names->end()) { + continue; + } + merged_cache_sources[tensor->name] = tensor; + } } - free_cache_ctx_and_buffer(); + for (const auto& kv : cache_tensor_map) { + if (cache_keep_names != nullptr && cache_keep_names->find(kv.first) == cache_keep_names->end()) { + continue; + } + merged_cache_sources[kv.first] = kv.second; + } + cache_tensor_map.clear(); + if (merged_cache_sources.empty()) { + if (old_cache_buffer != nullptr) { + ggml_backend_buffer_free(old_cache_buffer); + } + if (old_cache_ctx != nullptr) { + ggml_free(old_cache_ctx); + } + return true; + } + alloc_cache_ctx(); - GGML_ASSERT(cache_buffer == nullptr); - std::map runtime_tensor_to_cache_tensor; - for (auto kv : cache_tensor_map) { - auto cache_tensor = ggml_dup_tensor(cache_ctx, kv.second); + std::vector> source_to_cache_tensors; + source_to_cache_tensors.reserve(merged_cache_sources.size()); + for (const auto& kv : merged_cache_sources) { + ggml_tensor* source_tensor = sd::ggml_graph_cut::cache_source_tensor(kv.second); + auto cache_tensor = ggml_dup_tensor(cache_ctx, source_tensor); ggml_set_name(cache_tensor, kv.first.c_str()); - runtime_tensor_to_cache_tensor[kv.second] = cache_tensor; + source_to_cache_tensors.push_back({source_tensor, cache_tensor}); } size_t num_tensors = ggml_tensor_num(cache_ctx); cache_buffer = ggml_backend_alloc_ctx_tensors(cache_ctx, runtime_backend); GGML_ASSERT(cache_buffer != nullptr); - for (auto kv : runtime_tensor_to_cache_tensor) { - ggml_backend_tensor_copy(kv.first, kv.second); + for (const auto& kv : source_to_cache_tensors) { + ggml_tensor* src = kv.first; + ggml_tensor* dst = kv.second; + ggml_backend_buffer_t src_buf = sd::ggml_graph_cut::tensor_buffer(src); + ggml_backend_buffer_t dst_buf = sd::ggml_graph_cut::tensor_buffer(dst); + if (src_buf == nullptr || dst_buf == nullptr) { + LOG_ERROR("%s cache copy tensor buffer missing: name=%s src_buffer=%p src_view_src=%p src_view_src_buffer=%p dst_buffer=%p", + get_desc().c_str(), + src && src->name[0] != '\0' ? src->name : "", + src ? src->buffer : nullptr, + src ? src->view_src : nullptr, + (src && src->view_src) ? src->view_src->buffer : nullptr, + dst ? dst->buffer : nullptr); + return false; + } + const bool use_staging_copy = src->view_src != nullptr || !ggml_is_contiguous(src) || src->buffer == nullptr; + if (use_staging_copy) { + std::vector host_data(ggml_nbytes(src)); + ggml_backend_tensor_get(src, host_data.data(), 0, host_data.size()); + ggml_backend_tensor_set(dst, host_data.data(), 0, host_data.size()); + } else { + ggml_backend_tensor_copy(src, dst); + } } ggml_backend_synchronize(runtime_backend); - cache_tensor_map.clear(); size_t cache_buffer_size = ggml_backend_buffer_get_size(cache_buffer); LOG_DEBUG("%s cache backend buffer size = % 6.2f MB(%s) (%i tensors)", get_desc().c_str(), cache_buffer_size / (1024.f * 1024.f), ggml_backend_is_cpu(runtime_backend) ? "RAM" : "VRAM", num_tensors); + if (old_cache_buffer != nullptr) { + ggml_backend_buffer_free(old_cache_buffer); + } + if (old_cache_ctx != nullptr) { + ggml_free(old_cache_ctx); + } + return true; } - void copy_data_to_backend_tensor() { + void copy_data_to_backend_tensor(ggml_cgraph* gf, bool clear_after_copy = true) { + GGML_ASSERT(gf != nullptr); + std::unordered_set graph_tensor_set; + const int n_leafs = sd::ggml_graph_cut::leaf_count(gf); + const int n_nodes = ggml_graph_n_nodes(gf); + graph_tensor_set.reserve(static_cast(n_leafs + n_nodes)); + for (int i = 0; i < n_leafs; ++i) { + graph_tensor_set.insert(sd::ggml_graph_cut::leaf_tensor(gf, i)); + } + for (int i = 0; i < n_nodes; ++i) { + graph_tensor_set.insert(ggml_graph_node(gf, i)); + } + for (auto& kv : backend_tensor_data_map) { auto tensor = kv.first; auto data = kv.second; + if (graph_tensor_set.find(tensor) == graph_tensor_set.end()) { + continue; + } + + ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; + if (buf == nullptr) { + LOG_WARN("%s graph exec skip tensor copy: name=%s op=%s reason=buffer_not_set data=%p view_src=%p view_src_buffer=%p", + get_desc().c_str(), + tensor && tensor->name[0] != '\0' ? tensor->name : "", + tensor ? ggml_op_name(tensor->op) : "", + data, + tensor ? tensor->view_src : nullptr, + (tensor && tensor->view_src) ? tensor->view_src->buffer : nullptr); + continue; + } + ggml_backend_tensor_set(tensor, data, 0, ggml_nbytes(tensor)); } - backend_tensor_data_map.clear(); + if (clear_after_copy) { + backend_tensor_data_map.clear(); + } } - bool offload_params_to_runtime_backend() { + bool offload_all_params() { + restore_partial_params(); if (params_backend == runtime_backend) { return true; } @@ -1958,6 +2084,7 @@ protected: num_tensors); return false; } + ggml_backend_buffer_set_usage(runtime_params_buffer, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); ggml_tensor* t = ggml_get_first_tensor(params_ctx); ggml_tensor* offload_t = ggml_get_first_tensor(offload_ctx); @@ -1987,7 +2114,85 @@ protected: return true; } - void offload_params_to_params_backend() { + bool offload_partial_params(const std::vector& tensors) { + restore_partial_params(); + if (params_backend == runtime_backend) { + return true; + } + if (tensors.empty()) { + return true; + } + GGML_ASSERT(!params_on_runtime_backend); + GGML_ASSERT(partial_runtime_params_buffer == nullptr); + + std::vector unique_tensors; + std::unordered_set seen_tensors; + unique_tensors.reserve(tensors.size()); + seen_tensors.reserve(tensors.size()); + for (ggml_tensor* tensor : tensors) { + if (tensor == nullptr) { + continue; + } + if (seen_tensors.insert(tensor).second) { + unique_tensors.push_back(tensor); + } + } + if (unique_tensors.empty()) { + return true; + } + + ggml_init_params params; + params.mem_size = std::max(1, unique_tensors.size()) * ggml_tensor_overhead(); + params.mem_buffer = nullptr; + params.no_alloc = true; + + partial_offload_ctx = ggml_init(params); + GGML_ASSERT(partial_offload_ctx != nullptr); + + partial_offload_pairs.clear(); + partial_offload_pairs.reserve(unique_tensors.size()); + + for (ggml_tensor* tensor : unique_tensors) { + GGML_ASSERT(tensor->view_src == nullptr); + ggml_tensor* offload_tensor = ggml_dup_tensor(partial_offload_ctx, tensor); + ggml_set_name(offload_tensor, tensor->name); + partial_offload_pairs.push_back({tensor, offload_tensor}); + } + + partial_runtime_params_buffer = ggml_backend_alloc_ctx_tensors(partial_offload_ctx, runtime_backend); + if (partial_runtime_params_buffer == nullptr) { + LOG_ERROR("%s alloc partial runtime params backend buffer failed, num_tensors = %zu", + get_desc().c_str(), + partial_offload_pairs.size()); + ggml_free(partial_offload_ctx); + partial_offload_ctx = nullptr; + partial_offload_pairs.clear(); + return false; + } + ggml_backend_buffer_set_usage(partial_runtime_params_buffer, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + + for (auto& pair : partial_offload_pairs) { + ggml_tensor* tensor = pair.first; + ggml_tensor* offload_tensor = pair.second; + + ggml_backend_tensor_copy(tensor, offload_tensor); + std::swap(tensor->buffer, offload_tensor->buffer); + std::swap(tensor->data, offload_tensor->data); + std::swap(tensor->extra, offload_tensor->extra); + } + + size_t params_buffer_size = ggml_backend_buffer_get_size(partial_runtime_params_buffer); + LOG_DEBUG("%s offload partial params (%6.2f MB, %zu tensors) to runtime backend (%s)", + get_desc().c_str(), + params_buffer_size / (1024.f * 1024.f), + partial_offload_pairs.size(), + ggml_backend_name(runtime_backend)); + + return true; + } + + void restore_all_params() { + restore_partial_params(); if (!params_on_runtime_backend) { return; } @@ -2013,17 +2218,323 @@ protected: params_on_runtime_backend = false; } + void restore_partial_params() { + if (partial_offload_pairs.empty()) { + if (partial_runtime_params_buffer != nullptr) { + ggml_backend_buffer_free(partial_runtime_params_buffer); + partial_runtime_params_buffer = nullptr; + } + if (partial_offload_ctx != nullptr) { + ggml_free(partial_offload_ctx); + partial_offload_ctx = nullptr; + } + return; + } + + for (auto& pair : partial_offload_pairs) { + ggml_tensor* tensor = pair.first; + ggml_tensor* offload_tensor = pair.second; + + tensor->buffer = offload_tensor->buffer; + tensor->data = offload_tensor->data; + tensor->extra = offload_tensor->extra; + offload_tensor->buffer = nullptr; + offload_tensor->data = nullptr; + offload_tensor->extra = nullptr; + } + + if (partial_runtime_params_buffer != nullptr) { + ggml_backend_buffer_free(partial_runtime_params_buffer); + partial_runtime_params_buffer = nullptr; + } + partial_offload_pairs.clear(); + + if (partial_offload_ctx != nullptr) { + ggml_free(partial_offload_ctx); + partial_offload_ctx = nullptr; + } + } + + bool should_use_graph_cut_segmented_compute(const GraphCutPlan& plan) { + return plan.has_cuts && + plan.valid && + max_graph_vram_bytes > 0 && + plan.segments.size() > 1 && + params_backend != runtime_backend && + !ggml_backend_is_cpu(runtime_backend); + } + + bool can_attempt_graph_cut_segmented_compute() const { + return max_graph_vram_bytes > 0 && + params_backend != runtime_backend && + !ggml_backend_is_cpu(runtime_backend); + } + + bool resolve_graph_cut_plan(ggml_cgraph* gf, + GraphCutPlan* plan_out) { + GGML_ASSERT(plan_out != nullptr); + GGML_ASSERT(gf != nullptr); + *plan_out = sd::ggml_graph_cut::resolve_plan(runtime_backend, + gf, + &graph_cut_plan_cache_, + max_graph_vram_bytes, + params_tensor_set_, + get_desc().c_str()); + return true; + } + + void reset_segment_runtime_tensors(const GraphCutSegment& segment, + ggml_cgraph* gf) { + GGML_ASSERT(gf != nullptr); + + for (const auto& input : segment.input_refs) { + ggml_tensor* input_tensor = sd::ggml_graph_cut::input_tensor(gf, input); + if (input_tensor == nullptr) { + continue; + } + switch (input.type) { + case GraphCutSegment::INPUT_PREVIOUS_CUT: + case GraphCutSegment::INPUT_EXTERNAL: + input_tensor->buffer = nullptr; + input_tensor->data = nullptr; + input_tensor->extra = nullptr; + break; + case GraphCutSegment::INPUT_PARAM: + break; + } + } + + for (int node_idx : segment.internal_node_indices) { + ggml_tensor* node = ggml_graph_node(gf, node_idx); + if (node == nullptr) { + continue; + } + node->buffer = nullptr; + node->data = nullptr; + node->extra = nullptr; + } + } + + bool bind_segment_cached_inputs(ggml_cgraph* gf, const GraphCutSegment& segment) { + GGML_ASSERT(gf != nullptr); + for (const auto& input : segment.input_refs) { + ggml_tensor* input_tensor = sd::ggml_graph_cut::input_tensor(gf, input); + if (input_tensor == nullptr) { + continue; + } + switch (input.type) { + case GraphCutSegment::INPUT_PREVIOUS_CUT: { + ggml_tensor* cache_tensor = get_cache_tensor_by_name(input.display_name); + if (cache_tensor == nullptr) { + LOG_ERROR("%s missing graph cut cache tensor: %s", + get_desc().c_str(), + input.display_name.c_str()); + return false; + } + if (input_tensor->view_src != nullptr) { + input_tensor->view_src = cache_tensor; + input_tensor->buffer = nullptr; + input_tensor->data = cache_tensor->data == nullptr + ? nullptr + : static_cast(static_cast(cache_tensor->data) + input_tensor->view_offs); + input_tensor->extra = cache_tensor->extra; + } else { + input_tensor->buffer = cache_tensor->buffer; + input_tensor->data = cache_tensor->data; + input_tensor->extra = cache_tensor->extra; + } + for (int src_idx = 0; src_idx < GGML_MAX_SRC; ++src_idx) { + input_tensor->src[src_idx] = nullptr; + } + input_tensor->op = GGML_OP_NONE; + break; + } + case GraphCutSegment::INPUT_EXTERNAL: + case GraphCutSegment::INPUT_PARAM: + break; + } + } + return true; + } + + template + std::optional> execute_graph(ggml_cgraph* gf, + int n_threads, + bool free_compute_buffer_immediately, + const std::vector& runtime_param_tensors, + bool preserve_backend_tensor_data_map, + bool no_return = false, + const std::unordered_set* cache_keep_names = nullptr) { + int64_t t_execute_begin = ggml_time_ms(); + const bool use_partial_param_offload = !runtime_param_tensors.empty(); + int64_t t_offload_begin = ggml_time_ms(); + if (use_partial_param_offload) { + if (!offload_partial_params(runtime_param_tensors)) { + LOG_ERROR("%s offload partial params to runtime backend failed", get_desc().c_str()); + return std::nullopt; + } + } else { + if (!offload_all_params()) { + LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str()); + return std::nullopt; + } + } + int64_t t_offload_end = ggml_time_ms(); + + int64_t t_alloc_begin = ggml_time_ms(); + if (!alloc_compute_buffer(gf)) { + LOG_ERROR("%s alloc compute buffer failed", get_desc().c_str()); + if (use_partial_param_offload) { + restore_partial_params(); + } + return std::nullopt; + } + + if (!ggml_gallocr_alloc_graph(compute_allocr, gf)) { + LOG_ERROR("%s alloc compute graph failed", get_desc().c_str()); + if (free_compute_buffer_immediately) { + free_compute_buffer(); + } else if (use_partial_param_offload) { + restore_partial_params(); + } + return std::nullopt; + } + int64_t t_alloc_end = ggml_time_ms(); + + int64_t t_copy_begin = ggml_time_ms(); + copy_data_to_backend_tensor(gf, !preserve_backend_tensor_data_map); + int64_t t_copy_end = ggml_time_ms(); + if (ggml_backend_is_cpu(runtime_backend)) { + ggml_backend_cpu_set_n_threads(runtime_backend, n_threads); + } + + int64_t t_compute_begin = ggml_time_ms(); + ggml_status status = ggml_backend_graph_compute(runtime_backend, gf); + int64_t t_compute_end = ggml_time_ms(); + if (status != GGML_STATUS_SUCCESS) { + LOG_ERROR("%s compute failed: %s", get_desc().c_str(), ggml_status_to_string(status)); + if (free_compute_buffer_immediately) { + free_compute_buffer(); + } else if (use_partial_param_offload) { + restore_partial_params(); + } + return std::nullopt; + } + + int64_t t_cache_begin = ggml_time_ms(); + if (!copy_cache_tensors_to_cache_buffer(cache_keep_names)) { + if (free_compute_buffer_immediately) { + free_compute_buffer(); + } else if (use_partial_param_offload) { + restore_partial_params(); + } + return std::nullopt; + } + int64_t t_cache_end = ggml_time_ms(); + auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str()); + std::optional> output; + if (!no_return) { + output = sd::make_sd_tensor_from_ggml(result); + } else { + output = sd::Tensor(); + } + + if (free_compute_buffer_immediately) { + free_compute_buffer(); + } else if (use_partial_param_offload) { + restore_partial_params(); + } + if (use_partial_param_offload) { + LOG_DEBUG("%s execute_graph timing: offload=%lld ms alloc=%lld ms copy_in=%lld ms compute=%lld ms cache=%lld ms total=%lld ms", + get_desc().c_str(), + t_offload_end - t_offload_begin, + t_alloc_end - t_alloc_begin, + t_copy_end - t_copy_begin, + t_compute_end - t_compute_begin, + t_cache_end - t_cache_begin, + ggml_time_ms() - t_execute_begin); + } + return output; + } + + template + std::optional> compute_with_graph_cuts(ggml_cgraph* gf, + const GraphCutPlan& plan, + int n_threads, + bool free_compute_buffer_immediately, + bool no_return = false) { + GGML_ASSERT(gf != nullptr); + + free_compute_buffer(); + free_cache_ctx_and_buffer(); + + std::optional> output = sd::Tensor(); + for (size_t seg_idx = 0; seg_idx < plan.segments.size(); ++seg_idx) { + int64_t t_segment_begin = ggml_time_ms(); + const auto& segment = plan.segments[seg_idx]; + auto future_cut_names = sd::ggml_graph_cut::collect_future_input_names(gf, plan, seg_idx); + LOG_DEBUG("%s graph cut executing segment %zu/%zu: %s", + get_desc().c_str(), + seg_idx + 1, + plan.segments.size(), + segment.group_name.c_str()); + + reset_segment_runtime_tensors(segment, gf); + if (!bind_segment_cached_inputs(gf, segment)) { + free_cache_ctx_and_buffer(); + free_compute_buffer(); + free_compute_ctx(); + return std::nullopt; + } + + const bool is_last_segment = seg_idx + 1 == plan.segments.size(); + if (!is_last_segment) { + for (size_t output_idx = 0; output_idx < segment.output_node_indices.size(); ++output_idx) { + ggml_tensor* output_tensor = sd::ggml_graph_cut::output_tensor(gf, segment, output_idx); + if (output_tensor != nullptr && + sd::ggml_graph_cut::is_graph_cut_tensor(output_tensor) && + future_cut_names.find(output_tensor->name) != future_cut_names.end()) { + cache(output_tensor->name, output_tensor); + } + } + } + + ggml_context* segment_graph_ctx = nullptr; + ggml_cgraph* segment_graph = sd::ggml_graph_cut::build_segment_graph(gf, segment, &segment_graph_ctx); + auto segment_output = execute_graph(segment_graph, + n_threads, + true, + sd::ggml_graph_cut::runtime_param_tensors(gf, segment, get_desc().c_str()), + true, + !is_last_segment || no_return, + &future_cut_names); + ggml_free(segment_graph_ctx); + if (!segment_output.has_value()) { + free_cache_ctx_and_buffer(); + free_compute_buffer(); + free_compute_ctx(); + return std::nullopt; + } + output = std::move(segment_output); + } + + backend_tensor_data_map.clear(); + free_cache_ctx_and_buffer(); + free_compute_ctx(); + return output; + } + public: virtual std::string get_desc() = 0; GGMLRunner(ggml_backend_t backend, bool offload_params_to_cpu = false) : runtime_backend(backend) { - alloc_params_ctx(); if (!ggml_backend_is_cpu(runtime_backend) && offload_params_to_cpu) { params_backend = ggml_backend_cpu_init(); } else { params_backend = runtime_backend; } + alloc_params_ctx(); } virtual ~GGMLRunner() { @@ -2063,6 +2574,8 @@ public: num_tensors); return false; } + rebuild_params_tensor_set(); + ggml_backend_buffer_set_usage(params_buffer, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); size_t params_buffer_size = ggml_backend_buffer_get_size(params_buffer); LOG_DEBUG("%s params backend buffer size = % 6.2f MB(%s) (%i tensors)", get_desc().c_str(), @@ -2096,7 +2609,8 @@ public: ggml_gallocr_free(compute_allocr); compute_allocr = nullptr; } - offload_params_to_params_backend(); + restore_partial_params(); + restore_all_params(); } // do copy after alloc graph @@ -2160,41 +2674,36 @@ public: int n_threads, bool free_compute_buffer_immediately, bool no_return = false) { - if (!offload_params_to_runtime_backend()) { - LOG_ERROR("%s offload params to runtime backend failed", get_desc().c_str()); + ggml_cgraph* gf = nullptr; + if (!prepare_compute_graph(get_graph, &gf)) { return std::nullopt; } - if (!alloc_compute_buffer(get_graph)) { + GGML_ASSERT(gf != nullptr); + + if (can_attempt_graph_cut_segmented_compute()) { + GraphCutPlan plan; + if (!resolve_graph_cut_plan(gf, &plan)) { + free_compute_ctx(); + return std::nullopt; + } + if (should_use_graph_cut_segmented_compute(plan)) { + return compute_with_graph_cuts(gf, + plan, + n_threads, + free_compute_buffer_immediately, + no_return); + } + } + if (!alloc_compute_buffer(gf)) { LOG_ERROR("%s alloc compute buffer failed", get_desc().c_str()); return std::nullopt; } - reset_compute_ctx(); - ggml_cgraph* gf = get_compute_graph(get_graph); - if (!ggml_gallocr_alloc_graph(compute_allocr, gf)) { - LOG_ERROR("%s alloc compute graph failed", get_desc().c_str()); - return std::nullopt; - } - copy_data_to_backend_tensor(); - if (ggml_backend_is_cpu(runtime_backend)) { - ggml_backend_cpu_set_n_threads(runtime_backend, n_threads); - } - - ggml_status status = ggml_backend_graph_compute(runtime_backend, gf); - if (status != GGML_STATUS_SUCCESS) { - LOG_ERROR("%s compute failed: %s", get_desc().c_str(), ggml_status_to_string(status)); - return std::nullopt; - } - copy_cache_tensors_to_cache_buffer(); - auto result = ggml_get_tensor(compute_ctx, final_result_name.c_str()); - std::optional> output; - if (!no_return) { - output = sd::make_sd_tensor_from_ggml(result); - } - - if (free_compute_buffer_immediately) { - free_compute_buffer(); - } - return output; + return execute_graph(gf, + n_threads, + free_compute_buffer_immediately, + {}, + false, + no_return); } void set_flash_attention_enabled(bool enabled) { @@ -2214,6 +2723,10 @@ public: weight_adapter = adapter; } + void set_max_graph_vram_bytes(size_t max_vram_bytes) { + max_graph_vram_bytes = max_vram_bytes; + } + ggml_backend_t get_runtime_backend() { return runtime_backend; } diff --git a/src/ggml_graph_cut.cpp b/src/ggml_graph_cut.cpp new file mode 100644 index 00000000..f206f2d2 --- /dev/null +++ b/src/ggml_graph_cut.cpp @@ -0,0 +1,676 @@ +#include "ggml_graph_cut.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "ggml-alloc.h" +#include "ggml-backend.h" +#include "util.h" + +#include "../ggml/src/ggml-impl.h" + +namespace sd::ggml_graph_cut { + + static std::string graph_cut_tensor_display_name(const ggml_tensor* tensor) { + if (tensor == nullptr) { + return ""; + } + if (tensor->name[0] != '\0') { + return tensor->name; + } + return sd_format("", (const void*)tensor); + } + + static int graph_leaf_index(ggml_cgraph* gf, const ggml_tensor* tensor) { + GGML_ASSERT(gf != nullptr); + GGML_ASSERT(tensor != nullptr); + for (int i = 0; i < gf->n_leafs; ++i) { + if (gf->leafs[i] == tensor) { + return i; + } + } + return -1; + } + + static bool is_params_tensor(const std::unordered_set& params_tensor_set, + const ggml_tensor* tensor) { + if (tensor == nullptr) { + return false; + } + return params_tensor_set.find(tensor) != params_tensor_set.end(); + } + + static Plan::InputShape input_shape(const ggml_tensor* tensor) { + Plan::InputShape shape; + if (tensor == nullptr) { + return shape; + } + shape.type = tensor->type; + for (int i = 0; i < GGML_MAX_DIMS; ++i) { + shape.ne[static_cast(i)] = tensor->ne[i]; + } + return shape; + } + + static size_t graph_cut_segment_vram_bytes(const Segment& segment) { + return segment.compute_buffer_size + + segment.input_param_bytes + + segment.input_previous_cut_bytes + + segment.output_bytes; + } + + static Segment make_segment_seed(const Plan& plan, + size_t start_segment_index, + size_t end_segment_index) { + GGML_ASSERT(start_segment_index < plan.segments.size()); + GGML_ASSERT(end_segment_index < plan.segments.size()); + GGML_ASSERT(start_segment_index <= end_segment_index); + + Segment seed; + const auto& start_segment = plan.segments[start_segment_index]; + const auto& target_segment = plan.segments[end_segment_index]; + std::unordered_set seen_output_node_indices; + for (size_t seg_idx = start_segment_index; seg_idx <= end_segment_index; ++seg_idx) { + for (int output_node_index : plan.segments[seg_idx].output_node_indices) { + if (seen_output_node_indices.insert(output_node_index).second) { + seed.output_node_indices.push_back(output_node_index); + } + } + } + if (start_segment_index == end_segment_index) { + seed.group_name = target_segment.group_name; + } else { + seed.group_name = sd_format("%s..%s", + start_segment.group_name.c_str(), + target_segment.group_name.c_str()); + } + return seed; + } + + static void build_segment(ggml_cgraph* gf, + Plan& plan, + Segment& segment, + const std::unordered_map& producer_index, + std::unordered_set& available_cut_output_node_indices, + ggml_backend_t backend, + const std::unordered_set& params_tensor_set, + const char* log_desc) { + std::set internal_nodes; + std::unordered_set input_seen; + std::vector input_refs; + + std::stack work_stack; + for (int output_node_index : segment.output_node_indices) { + ggml_tensor* output = ggml_graph_node(gf, output_node_index); + if (output != nullptr) { + work_stack.push(output); + } + } + + while (!work_stack.empty()) { + ggml_tensor* tensor = work_stack.top(); + work_stack.pop(); + + if (tensor == nullptr) { + continue; + } + + auto producer_it = producer_index.find(tensor); + if (producer_it == producer_index.end()) { + if (input_seen.insert(tensor).second) { + Segment::InputRef input_ref; + input_ref.type = is_params_tensor(params_tensor_set, tensor) ? Segment::INPUT_PARAM : Segment::INPUT_EXTERNAL; + input_ref.display_name = graph_cut_tensor_display_name(tensor); + input_ref.leaf_index = graph_leaf_index(gf, tensor); + input_refs.push_back(std::move(input_ref)); + } + continue; + } + + int node_idx = producer_it->second; + if (available_cut_output_node_indices.find(node_idx) != available_cut_output_node_indices.end()) { + if (input_seen.insert(tensor).second) { + Segment::InputRef input_ref; + input_ref.type = Segment::INPUT_PREVIOUS_CUT; + input_ref.display_name = graph_cut_tensor_display_name(tensor); + input_ref.node_index = node_idx; + input_refs.push_back(std::move(input_ref)); + } + continue; + } + + if (!internal_nodes.insert(node_idx).second) { + continue; + } + + ggml_tensor* node = ggml_graph_node(gf, node_idx); + for (int src_idx = 0; src_idx < GGML_MAX_SRC; ++src_idx) { + if (node->src[src_idx] != nullptr) { + work_stack.push(node->src[src_idx]); + } + } + } + + if (!internal_nodes.empty()) { + segment.internal_node_indices.assign(internal_nodes.begin(), internal_nodes.end()); + } + + std::sort(input_refs.begin(), + input_refs.end(), + [](const Segment::InputRef& a, const Segment::InputRef& b) { + if (a.type != b.type) { + return a.type < b.type; + } + return a.display_name < b.display_name; + }); + segment.input_refs = input_refs; + for (const auto& input : input_refs) { + ggml_tensor* current_input = input_tensor(gf, input); + size_t tensor_bytes = current_input == nullptr + ? 0 + : (input.type == Segment::INPUT_PREVIOUS_CUT + ? cache_tensor_bytes(current_input) + : ggml_nbytes(current_input)); + switch (input.type) { + case Segment::INPUT_PREVIOUS_CUT: + segment.input_previous_cut_bytes += tensor_bytes; + break; + case Segment::INPUT_PARAM: + segment.input_param_bytes += tensor_bytes; + break; + case Segment::INPUT_EXTERNAL: + default: + segment.input_external_bytes += tensor_bytes; + break; + } + } + for (int output_node_index : segment.output_node_indices) { + ggml_tensor* output = ggml_graph_node(gf, output_node_index); + segment.output_bytes += cache_tensor_bytes(output); + } + segment.compute_buffer_size = measure_segment_compute_buffer(backend, gf, segment, log_desc); + + for (int output_node_index : segment.output_node_indices) { + available_cut_output_node_indices.insert(output_node_index); + } + plan.segments.push_back(std::move(segment)); + } + + bool is_graph_cut_tensor(const ggml_tensor* tensor) { + if (tensor == nullptr || tensor->name[0] == '\0') { + return false; + } + return std::strncmp(tensor->name, GGML_RUNNER_CUT_PREFIX, std::strlen(GGML_RUNNER_CUT_PREFIX)) == 0; + } + + std::string make_graph_cut_name(const std::string& group, const std::string& output) { + return std::string(GGML_RUNNER_CUT_PREFIX) + group + "|" + output; + } + + void mark_graph_cut(ggml_tensor* tensor, const std::string& group, const std::string& output) { + if (tensor == nullptr) { + return; + } + auto name = make_graph_cut_name(group, output); + ggml_set_name(tensor, name.c_str()); + } + + int leaf_count(ggml_cgraph* gf) { + GGML_ASSERT(gf != nullptr); + return gf->n_leafs; + } + + ggml_tensor* leaf_tensor(ggml_cgraph* gf, int leaf_index) { + GGML_ASSERT(gf != nullptr); + if (leaf_index < 0 || leaf_index >= gf->n_leafs) { + return nullptr; + } + return gf->leafs[leaf_index]; + } + + ggml_backend_buffer_t tensor_buffer(const ggml_tensor* tensor) { + if (tensor == nullptr) { + return nullptr; + } + return tensor->view_src ? tensor->view_src->buffer : tensor->buffer; + } + + ggml_tensor* cache_source_tensor(ggml_tensor* tensor) { + if (tensor == nullptr) { + return nullptr; + } + return tensor->view_src ? tensor->view_src : tensor; + } + + size_t cache_tensor_bytes(const ggml_tensor* tensor) { + if (tensor == nullptr) { + return 0; + } + const ggml_tensor* cache_src = tensor->view_src ? tensor->view_src : tensor; + return ggml_nbytes(cache_src); + } + + bool plan_matches_graph(ggml_cgraph* gf, const Plan& plan) { + GGML_ASSERT(gf != nullptr); + if (ggml_graph_n_nodes(gf) != plan.n_nodes || gf->n_leafs != plan.n_leafs) { + return false; + } + for (const auto& input_shape_ref : plan.input_shapes) { + if (input_shape_ref.leaf_index < 0 || input_shape_ref.leaf_index >= gf->n_leafs) { + return false; + } + ggml_tensor* leaf = gf->leafs[input_shape_ref.leaf_index]; + if (leaf == nullptr || input_shape_ref.type != leaf->type) { + return false; + } + for (int d = 0; d < GGML_MAX_DIMS; ++d) { + if (input_shape_ref.ne[static_cast(d)] != leaf->ne[d]) { + return false; + } + } + } + return true; + } + + ggml_tensor* output_tensor(ggml_cgraph* gf, const Segment& segment, size_t output_index) { + GGML_ASSERT(gf != nullptr); + if (output_index >= segment.output_node_indices.size()) { + return nullptr; + } + int node_index = segment.output_node_indices[output_index]; + if (node_index < 0 || node_index >= ggml_graph_n_nodes(gf)) { + return nullptr; + } + return ggml_graph_node(gf, node_index); + } + + ggml_tensor* input_tensor(ggml_cgraph* gf, const Segment::InputRef& input_ref) { + GGML_ASSERT(gf != nullptr); + if (input_ref.type == Segment::INPUT_PREVIOUS_CUT) { + if (input_ref.node_index < 0 || input_ref.node_index >= ggml_graph_n_nodes(gf)) { + return nullptr; + } + return ggml_graph_node(gf, input_ref.node_index); + } + if (input_ref.leaf_index < 0 || input_ref.leaf_index >= gf->n_leafs) { + return nullptr; + } + return leaf_tensor(gf, input_ref.leaf_index); + } + + std::vector param_tensors(ggml_cgraph* gf, const Segment& segment) { + GGML_ASSERT(gf != nullptr); + std::vector tensors; + std::unordered_set seen_tensors; + tensors.reserve(segment.input_refs.size()); + seen_tensors.reserve(segment.input_refs.size()); + for (const auto& input_ref : segment.input_refs) { + if (input_ref.type != Segment::INPUT_PARAM) { + continue; + } + ggml_tensor* tensor = input_tensor(gf, input_ref); + if (tensor == nullptr) { + continue; + } + if (seen_tensors.insert(tensor).second) { + tensors.push_back(tensor); + } + } + return tensors; + } + + std::vector runtime_param_tensors(ggml_cgraph* gf, const Segment& segment, const char* log_desc) { + std::vector tensors = param_tensors(gf, segment); + std::vector filtered_tensors; + filtered_tensors.reserve(tensors.size()); + for (ggml_tensor* tensor : tensors) { + if (tensor_buffer(tensor) == nullptr) { + LOG_WARN("%s graph cut skipping param input without buffer: segment=%s tensor=%s", + log_desc == nullptr ? "unknown" : log_desc, + segment.group_name.c_str(), + tensor->name); + continue; + } + filtered_tensors.push_back(tensor); + } + return filtered_tensors; + } + + std::unordered_set collect_future_input_names(ggml_cgraph* gf, + const Plan& plan, + size_t current_segment_index) { + GGML_ASSERT(gf != nullptr); + std::unordered_set future_input_names; + for (size_t seg_idx = current_segment_index + 1; seg_idx < plan.segments.size(); ++seg_idx) { + const auto& segment = plan.segments[seg_idx]; + for (const auto& input_ref : segment.input_refs) { + if (input_ref.type != Segment::INPUT_PREVIOUS_CUT) { + continue; + } + ggml_tensor* current_input = input_tensor(gf, input_ref); + if (current_input != nullptr && current_input->name[0] != '\0') { + future_input_names.insert(current_input->name); + } + } + } + return future_input_names; + } + + ggml_cgraph* build_segment_graph(ggml_cgraph* gf, + const Segment& segment, + ggml_context** graph_ctx_out) { + GGML_ASSERT(gf != nullptr); + GGML_ASSERT(graph_ctx_out != nullptr); + + const size_t graph_size = segment.internal_node_indices.size() + segment.input_refs.size() + 8; + ggml_init_params params = { + /*.mem_size =*/ggml_graph_overhead_custom(graph_size, false) + 1024, + /*.mem_buffer =*/nullptr, + /*.no_alloc =*/true, + }; + ggml_context* graph_ctx = ggml_init(params); + GGML_ASSERT(graph_ctx != nullptr); + ggml_cgraph* segment_graph = ggml_new_graph_custom(graph_ctx, graph_size, false); + GGML_ASSERT(segment_graph != nullptr); + + for (const auto& input : segment.input_refs) { + ggml_tensor* current_input = input_tensor(gf, input); + if (current_input == nullptr) { + continue; + } + GGML_ASSERT(segment_graph->n_leafs < segment_graph->size); + segment_graph->leafs[segment_graph->n_leafs++] = current_input; + } + + for (int output_node_index : segment.output_node_indices) { + ggml_tensor* output = ggml_graph_node(gf, output_node_index); + if (output == nullptr) { + continue; + } + ggml_set_output(output); + } + for (int node_idx : segment.internal_node_indices) { + ggml_graph_add_node(segment_graph, ggml_graph_node(gf, node_idx)); + } + *graph_ctx_out = graph_ctx; + return segment_graph; + } + + size_t measure_segment_compute_buffer(ggml_backend_t backend, + ggml_cgraph* gf, + const Segment& segment, + const char* log_desc) { + GGML_ASSERT(backend != nullptr); + GGML_ASSERT(gf != nullptr); + if (segment.internal_node_indices.empty()) { + return 0; + } + + ggml_context* graph_ctx = nullptr; + ggml_cgraph* segment_graph = build_segment_graph(gf, segment, &graph_ctx); + ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend)); + + size_t sizes[1] = {0}; + ggml_gallocr_reserve_n_size( + allocr, + segment_graph, + nullptr, + nullptr, + sizes); + size_t buffer_size = sizes[0]; + + ggml_gallocr_free(allocr); + ggml_free(graph_ctx); + return buffer_size; + } + + Plan build_plan(ggml_backend_t backend, + ggml_cgraph* gf, + const std::unordered_set& params_tensor_set, + const char* log_desc) { + GGML_ASSERT(backend != nullptr); + GGML_ASSERT(gf != nullptr); + Plan plan; + plan.available = true; + const int n_nodes = ggml_graph_n_nodes(gf); + if (n_nodes <= 0) { + return plan; + } + plan.n_nodes = n_nodes; + plan.n_leafs = gf->n_leafs; + for (int i = 0; i < gf->n_leafs; ++i) { + ggml_tensor* leaf = gf->leafs[i]; + if (is_params_tensor(params_tensor_set, leaf)) { + continue; + } + auto shape = input_shape(leaf); + shape.leaf_index = i; + plan.input_shapes.push_back(shape); + } + + std::unordered_map producer_index; + producer_index.reserve(static_cast(n_nodes)); + for (int i = 0; i < n_nodes; ++i) { + producer_index[ggml_graph_node(gf, i)] = i; + } + + std::vector grouped_segments; + std::unordered_map group_to_segment; + for (int i = 0; i < n_nodes; ++i) { + ggml_tensor* node = ggml_graph_node(gf, i); + if (!is_graph_cut_tensor(node)) { + continue; + } + + plan.has_cuts = true; + std::string full_name(node->name); + std::string payload = full_name.substr(std::strlen(GGML_RUNNER_CUT_PREFIX)); + size_t sep = payload.find('|'); + std::string group = sep == std::string::npos ? payload : payload.substr(0, sep); + + auto it = group_to_segment.find(group); + if (it == group_to_segment.end()) { + Segment segment; + segment.group_name = group; + segment.output_node_indices.push_back(i); + group_to_segment[group] = grouped_segments.size(); + grouped_segments.push_back(std::move(segment)); + } else { + auto& segment = grouped_segments[it->second]; + segment.output_node_indices.push_back(i); + } + } + + if (!plan.has_cuts) { + return plan; + } + + std::unordered_set available_cut_output_node_indices; + available_cut_output_node_indices.reserve(static_cast(n_nodes)); + for (auto& segment : grouped_segments) { + build_segment(gf, + plan, + segment, + producer_index, + available_cut_output_node_indices, + backend, + params_tensor_set, + log_desc); + } + + ggml_tensor* final_output = ggml_graph_node(gf, -1); + if (final_output != nullptr && available_cut_output_node_indices.find(n_nodes - 1) == available_cut_output_node_indices.end()) { + Segment final_segment; + final_segment.group_name = "ggml_runner.final"; + final_segment.output_node_indices.push_back(n_nodes - 1); + build_segment(gf, + plan, + final_segment, + producer_index, + available_cut_output_node_indices, + backend, + params_tensor_set, + log_desc); + } + + return plan; + } + + Plan apply_max_vram_budget(ggml_cgraph* gf, + const Plan& base_plan, + size_t max_graph_vram_bytes, + ggml_backend_t backend, + const std::unordered_set& params_tensor_set, + const char* log_desc) { + GGML_ASSERT(backend != nullptr); + GGML_ASSERT(gf != nullptr); + int64_t t_budget_begin = ggml_time_ms(); + if (max_graph_vram_bytes == 0 || !base_plan.has_cuts || base_plan.segments.size() <= 1) { + return base_plan; + } + + const int n_nodes = ggml_graph_n_nodes(gf); + std::unordered_map producer_index; + producer_index.reserve(static_cast(n_nodes)); + for (int i = 0; i < n_nodes; ++i) { + producer_index[ggml_graph_node(gf, i)] = i; + } + + Plan merged_plan; + merged_plan.available = true; + merged_plan.has_cuts = base_plan.has_cuts; + merged_plan.valid = base_plan.valid; + merged_plan.n_nodes = base_plan.n_nodes; + merged_plan.n_leafs = base_plan.n_leafs; + + std::unordered_set available_cut_output_node_indices; + available_cut_output_node_indices.reserve(static_cast(n_nodes)); + + size_t start_segment_index = 0; + while (start_segment_index < base_plan.segments.size()) { + Plan single_plan; + auto single_available_cut_output_node_indices = available_cut_output_node_indices; + auto single_seed = make_segment_seed(base_plan, + start_segment_index, + start_segment_index); + build_segment(gf, + single_plan, + single_seed, + producer_index, + single_available_cut_output_node_indices, + backend, + params_tensor_set, + log_desc); + GGML_ASSERT(!single_plan.segments.empty()); + + size_t best_end_segment_index = start_segment_index; + bool can_merge_next_segment = graph_cut_segment_vram_bytes(single_plan.segments.back()) <= max_graph_vram_bytes; + + while (can_merge_next_segment && best_end_segment_index + 1 < base_plan.segments.size()) { + const size_t next_end_segment_index = best_end_segment_index + 1; + Plan candidate_plan; + auto candidate_available_cut_output_node_indices = available_cut_output_node_indices; + auto candidate_seed = make_segment_seed(base_plan, + start_segment_index, + next_end_segment_index); + build_segment(gf, + candidate_plan, + candidate_seed, + producer_index, + candidate_available_cut_output_node_indices, + backend, + params_tensor_set, + log_desc); + GGML_ASSERT(!candidate_plan.segments.empty()); + + const auto& candidate_segment = candidate_plan.segments.back(); + if (graph_cut_segment_vram_bytes(candidate_segment) > max_graph_vram_bytes) { + break; + } + + best_end_segment_index = next_end_segment_index; + } + + auto best_seed = make_segment_seed(base_plan, + start_segment_index, + best_end_segment_index); + build_segment(gf, + merged_plan, + best_seed, + producer_index, + available_cut_output_node_indices, + backend, + params_tensor_set, + log_desc); + start_segment_index = best_end_segment_index + 1; + } + + if (log_desc != nullptr && merged_plan.segments.size() != base_plan.segments.size()) { + LOG_INFO("%s graph cut max_vram=%.2f MB merged %zu segments -> %zu segments", + log_desc, + max_graph_vram_bytes / 1024.0 / 1024.0, + base_plan.segments.size(), + merged_plan.segments.size()); + } + + if (log_desc != nullptr) { + LOG_INFO("%s graph cut max_vram budget merge took %lld ms", + log_desc, + ggml_time_ms() - t_budget_begin); + } + + return merged_plan; + } + + Plan resolve_plan(ggml_backend_t backend, + ggml_cgraph* gf, + PlanCache* cache, + size_t max_graph_vram_bytes, + const std::unordered_set& params_tensor_set, + const char* log_desc) { + GGML_ASSERT(backend != nullptr); + GGML_ASSERT(gf != nullptr); + GGML_ASSERT(cache != nullptr); + + int64_t t_prepare_begin = ggml_time_ms(); + Plan base_plan; + int64_t t_plan_begin = ggml_time_ms(); + if (cache->graph_cut_plan.available && plan_matches_graph(gf, cache->graph_cut_plan)) { + base_plan = cache->graph_cut_plan; + } else { + base_plan = build_plan(backend, gf, params_tensor_set, log_desc); + cache->graph_cut_plan = base_plan; + cache->graph_cut_plan.available = true; + cache->budgeted_graph_cut_plan.available = false; + if (log_desc != nullptr) { + LOG_INFO("%s build cached graph cut plan done (taking %lld ms)", log_desc, ggml_time_ms() - t_plan_begin); + } + } + + Plan resolved_plan = base_plan; + if (max_graph_vram_bytes > 0 && base_plan.has_cuts) { + if (cache->budgeted_graph_cut_plan.available && + cache->budgeted_graph_cut_plan_max_vram_bytes == max_graph_vram_bytes && + plan_matches_graph(gf, cache->budgeted_graph_cut_plan)) { + resolved_plan = cache->budgeted_graph_cut_plan; + } else { + resolved_plan = apply_max_vram_budget(gf, + base_plan, + max_graph_vram_bytes, + backend, + params_tensor_set, + log_desc); + cache->budgeted_graph_cut_plan = resolved_plan; + cache->budgeted_graph_cut_plan.available = true; + cache->budgeted_graph_cut_plan_max_vram_bytes = max_graph_vram_bytes; + } + } + return resolved_plan; + } + +} // namespace sd::ggml_graph_cut diff --git a/src/ggml_graph_cut.h b/src/ggml_graph_cut.h new file mode 100644 index 00000000..e42859c5 --- /dev/null +++ b/src/ggml_graph_cut.h @@ -0,0 +1,104 @@ +#ifndef __SD_GGML_GRAPH_CUT_H__ +#define __SD_GGML_GRAPH_CUT_H__ + +#include +#include +#include +#include + +#include "ggml-backend.h" +#include "ggml.h" + +namespace sd::ggml_graph_cut { + + struct Segment { + enum InputType { + INPUT_EXTERNAL = 0, + INPUT_PREVIOUS_CUT, + INPUT_PARAM, + }; + + struct InputRef { + InputType type = INPUT_EXTERNAL; + std::string display_name; + int leaf_index = -1; + int node_index = -1; + }; + + size_t compute_buffer_size = 0; + size_t output_bytes = 0; + size_t input_external_bytes = 0; + size_t input_previous_cut_bytes = 0; + size_t input_param_bytes = 0; + std::string group_name; + std::vector internal_node_indices; + std::vector output_node_indices; + std::vector input_refs; + }; + + struct Plan { + struct InputShape { + int leaf_index = -1; + ggml_type type = GGML_TYPE_COUNT; + std::array ne = {0, 0, 0, 0}; + }; + + bool available = false; + bool has_cuts = false; + bool valid = true; + int n_nodes = 0; + int n_leafs = 0; + std::vector input_shapes; + std::vector segments; + }; + + struct PlanCache { + Plan graph_cut_plan; + Plan budgeted_graph_cut_plan; + size_t budgeted_graph_cut_plan_max_vram_bytes = 0; + }; + + static constexpr const char* GGML_RUNNER_CUT_PREFIX = "ggml_runner_cut:"; + + bool is_graph_cut_tensor(const ggml_tensor* tensor); + std::string make_graph_cut_name(const std::string& group, const std::string& output); + void mark_graph_cut(ggml_tensor* tensor, const std::string& group, const std::string& output); + int leaf_count(ggml_cgraph* gf); + ggml_tensor* leaf_tensor(ggml_cgraph* gf, int leaf_index); + ggml_backend_buffer_t tensor_buffer(const ggml_tensor* tensor); + ggml_tensor* cache_source_tensor(ggml_tensor* tensor); + size_t cache_tensor_bytes(const ggml_tensor* tensor); + bool plan_matches_graph(ggml_cgraph* gf, const Plan& plan); + ggml_tensor* output_tensor(ggml_cgraph* gf, const Segment& segment, size_t output_index); + ggml_tensor* input_tensor(ggml_cgraph* gf, const Segment::InputRef& input_ref); + std::vector param_tensors(ggml_cgraph* gf, const Segment& segment); + std::vector runtime_param_tensors(ggml_cgraph* gf, const Segment& segment, const char* log_desc); + std::unordered_set collect_future_input_names(ggml_cgraph* gf, + const Plan& plan, + size_t current_segment_index); + ggml_cgraph* build_segment_graph(ggml_cgraph* gf, + const Segment& segment, + ggml_context** graph_ctx_out); + size_t measure_segment_compute_buffer(ggml_backend_t backend, + ggml_cgraph* gf, + const Segment& segment, + const char* log_desc); + Plan build_plan(ggml_backend_t backend, + ggml_cgraph* gf, + const std::unordered_set& params_tensor_set, + const char* log_desc); + Plan apply_max_vram_budget(ggml_cgraph* gf, + const Plan& base_plan, + size_t max_graph_vram_bytes, + ggml_backend_t backend, + const std::unordered_set& params_tensor_set, + const char* log_desc); + Plan resolve_plan(ggml_backend_t backend, + ggml_cgraph* gf, + PlanCache* cache, + size_t max_graph_vram_bytes, + const std::unordered_set& params_tensor_set, + const char* log_desc); +} // namespace sd::ggml_graph_cut + +#endif diff --git a/src/llm.hpp b/src/llm.hpp index 4afaa3ba..a67b4ebf 100644 --- a/src/llm.hpp +++ b/src/llm.hpp @@ -346,6 +346,7 @@ namespace LLM { auto merger = std::dynamic_pointer_cast(blocks["merger"]); auto x = patch_embed->forward(ctx, pixel_values); + sd::ggml_graph_cut::mark_graph_cut(x, "llm.vision.prelude", "x"); x = ggml_reshape_4d(ctx->ggml_ctx, x, x->ne[0] * spatial_merge_size * spatial_merge_size, x->ne[1] / spatial_merge_size / spatial_merge_size, x->ne[2], x->ne[3]); x = ggml_get_rows(ctx->ggml_ctx, x, window_index); @@ -359,9 +360,11 @@ namespace LLM { mask = nullptr; } x = block->forward(ctx, x, pe, mask); + sd::ggml_graph_cut::mark_graph_cut(x, "llm.vision.blocks." + std::to_string(i), "x"); } x = merger->forward(ctx, x); + sd::ggml_graph_cut::mark_graph_cut(x, "llm.vision.final", "x"); x = ggml_get_rows(ctx->ggml_ctx, x, window_inverse_index); @@ -506,6 +509,7 @@ namespace LLM { auto norm = std::dynamic_pointer_cast(blocks["norm"]); auto x = embed_tokens->forward(ctx, input_ids); + sd::ggml_graph_cut::mark_graph_cut(x, "llm.text.prelude", "x"); std::vector intermediate_outputs; @@ -552,6 +556,10 @@ namespace LLM { auto block = std::dynamic_pointer_cast(blocks["layers." + std::to_string(i)]); x = block->forward(ctx, x, input_pos, attention_mask); + if (out_layers.size() > 1) { + x = ggml_cont(ctx->ggml_ctx, x); + } + sd::ggml_graph_cut::mark_graph_cut(x, "llm.text.layers." + std::to_string(i), "x"); if (out_layers.find(i + 1) != out_layers.end()) { intermediate_outputs.push_back(x); } diff --git a/src/mmdit.hpp b/src/mmdit.hpp index e75736c5..e57041dc 100644 --- a/src/mmdit.hpp +++ b/src/mmdit.hpp @@ -767,6 +767,8 @@ public: auto context_x = block->forward(ctx, context, x, c_mod); context = context_x.first; x = context_x.second; + sd::ggml_graph_cut::mark_graph_cut(context, "mmdit.joint_blocks." + std::to_string(i), "context"); + sd::ggml_graph_cut::mark_graph_cut(x, "mmdit.joint_blocks." + std::to_string(i), "x"); } x = final_layer->forward(ctx, x, c_mod); // (N, T, patch_size ** 2 * out_channels) @@ -809,6 +811,11 @@ public: context = context_embedder->forward(ctx, context); // [N, L, D] aka [N, L, 1536] } + sd::ggml_graph_cut::mark_graph_cut(x, "mmdit.prelude", "x"); + sd::ggml_graph_cut::mark_graph_cut(c, "mmdit.prelude", "c"); + if (context != nullptr) { + sd::ggml_graph_cut::mark_graph_cut(context, "mmdit.prelude", "context"); + } x = forward_core_with_concat(ctx, x, c, context, skip_layers); // (N, H*W, patch_size ** 2 * out_channels) diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 1cbeb71d..35d32109 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -412,6 +412,9 @@ namespace Qwen { auto img = img_in->forward(ctx, x); auto txt = txt_norm->forward(ctx, context); txt = txt_in->forward(ctx, txt); + sd::ggml_graph_cut::mark_graph_cut(img, "qwen_image.prelude", "img"); + sd::ggml_graph_cut::mark_graph_cut(txt, "qwen_image.prelude", "txt"); + // sd::ggml_graph_cut::mark_graph_cut(t_emb, "qwen_image.prelude", "t_emb"); for (int i = 0; i < params.num_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["transformer_blocks." + std::to_string(i)]); @@ -419,6 +422,8 @@ namespace Qwen { auto result = block->forward(ctx, img, txt, t_emb, pe, modulate_index); img = result.first; txt = result.second; + sd::ggml_graph_cut::mark_graph_cut(img, "qwen_image.transformer_blocks." + std::to_string(i), "img"); + sd::ggml_graph_cut::mark_graph_cut(txt, "qwen_image.transformer_blocks." + std::to_string(i), "txt"); } if (params.zero_cond_t) { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 860cff85..fd439ff1 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -144,6 +144,7 @@ public: std::string taesd_path; sd_tiling_params_t vae_tiling_params = {false, 0, 0, 0.5f, 0, 0}; bool offload_params_to_cpu = false; + float max_vram = 0.f; bool use_pmid = false; bool is_using_v_parameterization = false; @@ -190,6 +191,7 @@ public: vae_decode_only = sd_ctx_params->vae_decode_only; free_params_immediately = sd_ctx_params->free_params_immediately; offload_params_to_cpu = sd_ctx_params->offload_params_to_cpu; + max_vram = sd_ctx_params->max_vram; bool use_tae = false; @@ -375,6 +377,10 @@ public: bool clip_on_cpu = sd_ctx_params->keep_clip_on_cpu; + const size_t max_graph_vram_bytes = max_vram <= 0.f + ? 0 + : static_cast(static_cast(max_vram) * 1024.0 * 1024.0 * 1024.0); + { clip_backend = backend; if (clip_on_cpu && !ggml_backend_is_cpu(backend)) { @@ -464,6 +470,7 @@ public: clip_vision = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map); + clip_vision->set_max_graph_vram_bytes(max_graph_vram_bytes); clip_vision->alloc_params_buffer(); clip_vision->get_param_tensors(tensors); } @@ -540,9 +547,11 @@ public: } } + cond_stage_model->set_max_graph_vram_bytes(max_graph_vram_bytes); cond_stage_model->alloc_params_buffer(); cond_stage_model->get_param_tensors(tensors); + diffusion_model->set_max_graph_vram_bytes(max_graph_vram_bytes); diffusion_model->alloc_params_buffer(); diffusion_model->get_param_tensors(tensors); @@ -551,6 +560,7 @@ public: } if (high_noise_diffusion_model) { + high_noise_diffusion_model->set_max_graph_vram_bytes(max_graph_vram_bytes); high_noise_diffusion_model->alloc_params_buffer(); high_noise_diffusion_model->get_param_tensors(tensors); } @@ -623,16 +633,19 @@ public: } else if (use_tae && !tae_preview_only) { LOG_INFO("using TAE for encoding / decoding"); first_stage_model = create_tae(); + first_stage_model->set_max_graph_vram_bytes(max_graph_vram_bytes); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "tae"); } else { LOG_INFO("using VAE for encoding / decoding"); first_stage_model = create_vae(); + first_stage_model->set_max_graph_vram_bytes(max_graph_vram_bytes); first_stage_model->alloc_params_buffer(); first_stage_model->get_param_tensors(tensors, "first_stage_model"); if (use_tae && tae_preview_only) { LOG_INFO("using TAE for preview"); preview_vae = create_tae(); + preview_vae->set_max_graph_vram_bytes(max_graph_vram_bytes); preview_vae->alloc_params_buffer(); preview_vae->get_param_tensors(tensors, "tae"); } @@ -2151,6 +2164,7 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { sd_ctx_params->prediction = PREDICTION_COUNT; sd_ctx_params->lora_apply_mode = LORA_APPLY_AUTO; sd_ctx_params->offload_params_to_cpu = false; + sd_ctx_params->max_vram = 0.f; sd_ctx_params->enable_mmap = false; sd_ctx_params->keep_clip_on_cpu = false; sd_ctx_params->keep_control_net_on_cpu = false; @@ -2192,6 +2206,7 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { "sampler_rng_type: %s\n" "prediction: %s\n" "offload_params_to_cpu: %s\n" + "max_vram: %.3f\n" "keep_clip_on_cpu: %s\n" "keep_control_net_on_cpu: %s\n" "keep_vae_on_cpu: %s\n" @@ -2224,6 +2239,7 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { sd_rng_type_name(sd_ctx_params->sampler_rng_type), sd_prediction_name(sd_ctx_params->prediction), BOOL_STR(sd_ctx_params->offload_params_to_cpu), + sd_ctx_params->max_vram, BOOL_STR(sd_ctx_params->keep_clip_on_cpu), BOOL_STR(sd_ctx_params->keep_control_net_on_cpu), BOOL_STR(sd_ctx_params->keep_vae_on_cpu), @@ -3441,9 +3457,13 @@ SD_API sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* s std::unique_ptr hires_upscaler; if (request.hires.upscaler == SD_HIRES_UPSCALER_MODEL) { LOG_INFO("hires fix: loading model upscaler from '%s'", request.hires.model_path); - hires_upscaler = std::make_unique(sd_ctx->sd->n_threads, + hires_upscaler = std::make_unique(sd_ctx->sd->n_threads, false, request.hires.upscale_tile_size); + const size_t max_graph_vram_bytes = sd_ctx->sd->max_vram <= 0.f + ? 0 + : static_cast(static_cast(sd_ctx->sd->max_vram) * 1024.0 * 1024.0 * 1024.0); + hires_upscaler->set_max_graph_vram_bytes(max_graph_vram_bytes); if (!hires_upscaler->load_from_file(request.hires.model_path, sd_ctx->sd->offload_params_to_cpu, sd_ctx->sd->n_threads)) { diff --git a/src/t5.hpp b/src/t5.hpp index bbd13e49..71545e52 100644 --- a/src/t5.hpp +++ b/src/t5.hpp @@ -251,7 +251,8 @@ public: ggml_tensor* x, ggml_tensor* past_bias = nullptr, ggml_tensor* attention_mask = nullptr, - ggml_tensor* relative_position_bucket = nullptr) { + ggml_tensor* relative_position_bucket = nullptr, + const std::string& graph_cut_prefix = "") { // x: [N, n_token, model_dim] for (int i = 0; i < num_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["block." + std::to_string(i)]); @@ -259,6 +260,9 @@ public: auto ret = block->forward(ctx, x, past_bias, attention_mask, relative_position_bucket); x = ret.first; past_bias = ret.second; + if (!graph_cut_prefix.empty()) { + sd::ggml_graph_cut::mark_graph_cut(x, graph_cut_prefix + ".block." + std::to_string(i), "x"); + } } auto final_layer_norm = std::dynamic_pointer_cast(blocks["final_layer_norm"]); @@ -305,7 +309,8 @@ public: auto encoder = std::dynamic_pointer_cast(blocks["encoder"]); auto x = shared->forward(ctx, input_ids); - x = encoder->forward(ctx, x, past_bias, attention_mask, relative_position_bucket); + sd::ggml_graph_cut::mark_graph_cut(x, "t5.prelude", "x"); + x = encoder->forward(ctx, x, past_bias, attention_mask, relative_position_bucket, "t5"); return x; } }; diff --git a/src/unet.hpp b/src/unet.hpp index 2a24f14e..d7ea8c3f 100644 --- a/src/unet.hpp +++ b/src/unet.hpp @@ -482,12 +482,14 @@ public: emb = ggml_add(ctx->ggml_ctx, emb, label_emb); // [N, time_embed_dim] } + // sd::ggml_graph_cut::mark_graph_cut(emb, "unet.prelude", "emb"); // input_blocks std::vector hs; // input block 0 auto h = input_blocks_0_0->forward(ctx, x); + sd::ggml_graph_cut::mark_graph_cut(h, "unet.input_blocks.0", "h"); ggml_set_name(h, "bench-start"); hs.push_back(h); @@ -505,6 +507,7 @@ public: std::string name = "input_blocks." + std::to_string(input_block_idx) + ".1"; h = attention_layer_forward(name, ctx, h, context, num_video_frames); // [N, mult*model_channels, h, w] } + sd::ggml_graph_cut::mark_graph_cut(h, "unet.input_blocks." + std::to_string(input_block_idx), "h"); hs.push_back(h); } if (tiny_unet) { @@ -518,6 +521,7 @@ public: auto block = std::dynamic_pointer_cast(blocks[name]); h = block->forward(ctx, h); // [N, mult*model_channels, h/(2^(i+1)), w/(2^(i+1))] + // sd::ggml_graph_cut::mark_graph_cut(h, "unet.input_blocks." + std::to_string(input_block_idx), "h"); hs.push_back(h); } } @@ -531,6 +535,7 @@ public: h = resblock_forward("middle_block.2", ctx, h, emb, num_video_frames); // [N, 4*model_channels, h/8, w/8] } } + sd::ggml_graph_cut::mark_graph_cut(h, "unet.middle_block", "h"); if (controls.size() > 0) { auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[controls.size() - 1], control_strength, true); h = ggml_add(ctx->ggml_ctx, h, cs); // middle control @@ -581,6 +586,7 @@ public: } output_block_idx += 1; + sd::ggml_graph_cut::mark_graph_cut(h, "unet.output_blocks." + std::to_string(output_block_idx - 1), "h"); } } diff --git a/src/upscaler.cpp b/src/upscaler.cpp index 80e68c94..25fc0c5d 100644 --- a/src/upscaler.cpp +++ b/src/upscaler.cpp @@ -12,6 +12,13 @@ UpscalerGGML::UpscalerGGML(int n_threads, tile_size(tile_size) { } +void UpscalerGGML::set_max_graph_vram_bytes(size_t max_vram_bytes) { + max_graph_vram_bytes = max_vram_bytes; + if (esrgan_upscaler) { + esrgan_upscaler->set_max_graph_vram_bytes(max_vram_bytes); + } +} + bool UpscalerGGML::load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu, int n_threads) { @@ -30,6 +37,7 @@ bool UpscalerGGML::load_from_file(const std::string& esrgan_path, } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); esrgan_upscaler = std::make_shared(backend, offload_params_to_cpu, tile_size, model_loader.get_tensor_storage_map()); + esrgan_upscaler->set_max_graph_vram_bytes(max_graph_vram_bytes); if (direct) { esrgan_upscaler->set_conv2d_direct_enabled(true); } diff --git a/src/upscaler.h b/src/upscaler.h index b11f004a..d667a6f1 100644 --- a/src/upscaler.h +++ b/src/upscaler.h @@ -14,8 +14,9 @@ struct UpscalerGGML { std::shared_ptr esrgan_upscaler; std::string esrgan_path; int n_threads; - bool direct = false; - int tile_size = 128; + bool direct = false; + int tile_size = 128; + size_t max_graph_vram_bytes = 0; UpscalerGGML(int n_threads, bool direct = false, @@ -24,6 +25,7 @@ struct UpscalerGGML { bool load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu, int n_threads); + void set_max_graph_vram_bytes(size_t max_vram_bytes); sd::Tensor upscale_tensor(const sd::Tensor& input_tensor); sd_image_t upscale(sd_image_t input_image, uint32_t upscale_factor); }; diff --git a/src/wan.hpp b/src/wan.hpp index 6860262c..26145330 100644 --- a/src/wan.hpp +++ b/src/wan.hpp @@ -692,6 +692,7 @@ namespace WAN { } else { x = conv1->forward(ctx, x); } + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encoder.prelude", "x"); // downsamples std::vector dims = {dim}; @@ -717,12 +718,14 @@ namespace WAN { x = layer->forward(ctx, x, b, feat_cache, feat_idx, chunk_idx); } } + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encoder.down." + std::to_string(i), "x"); } // middle x = middle_0->forward(ctx, x, b, feat_cache, feat_idx); x = middle_1->forward(ctx, x, b); x = middle_2->forward(ctx, x, b, feat_cache, feat_idx); + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encoder.mid", "x"); // head x = head_0->forward(ctx, x); @@ -863,11 +866,13 @@ namespace WAN { } else { x = conv1->forward(ctx, x); } + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decoder.prelude", "x"); // middle x = middle_0->forward(ctx, x, b, feat_cache, feat_idx); x = middle_1->forward(ctx, x, b); x = middle_2->forward(ctx, x, b, feat_cache, feat_idx); + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decoder.mid", "x"); // upsamples std::vector dims = {dim_mult[dim_mult.size() - 1] * dim}; @@ -893,6 +898,7 @@ namespace WAN { x = layer->forward(ctx, x, b, feat_cache, feat_idx, chunk_idx); } } + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decoder.up." + std::to_string(i), "x"); } // head @@ -1031,6 +1037,7 @@ namespace WAN { if (wan2_2) { x = patchify(ctx->ggml_ctx, x, 2, b); } + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.encode.prelude", "x"); auto encoder = std::dynamic_pointer_cast(blocks["encoder"]); auto conv1 = std::dynamic_pointer_cast(blocks["conv1"]); @@ -1051,6 +1058,7 @@ namespace WAN { } out = conv1->forward(ctx, out); auto mu = ggml_ext_chunk(ctx->ggml_ctx, out, 2, 3)[0]; + // sd::ggml_graph_cut::mark_graph_cut(mu, "wan_vae.encode.final", "mu"); clear_cache(); return mu; } @@ -1068,6 +1076,7 @@ namespace WAN { int64_t iter_ = z->ne[2]; auto x = conv2->forward(ctx, z); + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decode.prelude", "x"); ggml_tensor* out; for (int i = 0; i < iter_; i++) { _conv_idx = 0; @@ -1083,6 +1092,7 @@ namespace WAN { if (wan2_2) { out = unpatchify(ctx->ggml_ctx, out, 2, b); } + // sd::ggml_graph_cut::mark_graph_cut(out, "wan_vae.decode.final", "out"); clear_cache(); return out; } @@ -1097,13 +1107,15 @@ namespace WAN { auto decoder = std::dynamic_pointer_cast(blocks["decoder"]); auto conv2 = std::dynamic_pointer_cast(blocks["conv2"]); - auto x = conv2->forward(ctx, z); + auto x = conv2->forward(ctx, z); + // sd::ggml_graph_cut::mark_graph_cut(x, "wan_vae.decode_partial.prelude", "x"); auto in = ggml_ext_slice(ctx->ggml_ctx, x, 2, i, i + 1); // [b*c, 1, h, w] _conv_idx = 0; auto out = decoder->forward(ctx, in, b, _feat_map, _conv_idx, i); if (wan2_2) { out = unpatchify(ctx->ggml_ctx, out, 2, b); } + // sd::ggml_graph_cut::mark_graph_cut(out, "wan_vae.decode_partial.final", "out"); return out; } }; @@ -1984,6 +1996,13 @@ namespace WAN { c = ggml_reshape_3d(ctx->ggml_ctx, c, c->ne[0] * c->ne[1] * c->ne[2], c->ne[3] / N, N); // [N, dim, t_len*h_len*w_len] c = ggml_ext_cont(ctx->ggml_ctx, ggml_ext_torch_permute(ctx->ggml_ctx, c, 1, 0, 2, 3)); // [N, t_len*h_len*w_len, dim] } + sd::ggml_graph_cut::mark_graph_cut(x, "wan.prelude", "x"); + // sd::ggml_graph_cut::mark_graph_cut(e, "wan.prelude", "e"); + // sd::ggml_graph_cut::mark_graph_cut(e0, "wan.prelude", "e0"); + // sd::ggml_graph_cut::mark_graph_cut(context, "wan.prelude", "context"); + if (c != nullptr) { + sd::ggml_graph_cut::mark_graph_cut(c, "wan.prelude", "c"); + } auto x_orig = x; @@ -2004,6 +2023,10 @@ namespace WAN { c_skip = ggml_ext_scale(ctx->ggml_ctx, c_skip, vace_strength); x = ggml_add(ctx->ggml_ctx, x, c_skip); } + sd::ggml_graph_cut::mark_graph_cut(x, "wan.blocks." + std::to_string(i), "x"); + if (c != nullptr) { + sd::ggml_graph_cut::mark_graph_cut(c, "wan.blocks." + std::to_string(i), "c"); + } } x = head->forward(ctx, x, e); // [N, t_len*h_len*w_len, pt*ph*pw*out_dim] diff --git a/src/z_image.hpp b/src/z_image.hpp index 6bb44b79..00b69c26 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -371,6 +371,9 @@ namespace ZImage { auto txt = cap_embedder_1->forward(ctx, cap_embedder_0->forward(ctx, context)); // [N, n_txt_token, hidden_size] auto img = x_embedder->forward(ctx, x); // [N, n_img_token, hidden_size] + sd::ggml_graph_cut::mark_graph_cut(txt, "z_image.prelude", "txt"); + sd::ggml_graph_cut::mark_graph_cut(img, "z_image.prelude", "img"); + sd::ggml_graph_cut::mark_graph_cut(t_emb, "z_image.prelude", "t_emb"); int64_t n_txt_pad_token = Rope::bound_mod(static_cast(n_txt_token), SEQ_MULTI_OF); if (n_txt_pad_token > 0) { @@ -393,20 +396,24 @@ namespace ZImage { auto block = std::dynamic_pointer_cast(blocks["context_refiner." + std::to_string(i)]); txt = block->forward(ctx, txt, txt_pe, nullptr, nullptr); + sd::ggml_graph_cut::mark_graph_cut(txt, "z_image.context_refiner." + std::to_string(i), "txt"); } for (int i = 0; i < z_image_params.num_refiner_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["noise_refiner." + std::to_string(i)]); img = block->forward(ctx, img, img_pe, nullptr, t_emb); + sd::ggml_graph_cut::mark_graph_cut(img, "z_image.noise_refiner." + std::to_string(i), "img"); } auto txt_img = ggml_concat(ctx->ggml_ctx, txt, img, 1); // [N, n_txt_token + n_txt_pad_token + n_img_token + n_img_pad_token, hidden_size] + sd::ggml_graph_cut::mark_graph_cut(txt_img, "z_image.prelude", "txt_img"); for (int i = 0; i < z_image_params.num_layers; i++) { auto block = std::dynamic_pointer_cast(blocks["layers." + std::to_string(i)]); txt_img = block->forward(ctx, txt_img, pe, nullptr, t_emb); + sd::ggml_graph_cut::mark_graph_cut(txt_img, "z_image.layers." + std::to_string(i), "txt_img"); } txt_img = final_layer->forward(ctx, txt_img, t_emb); // [N, n_txt_token + n_txt_pad_token + n_img_token + n_img_pad_token, ph*pw*C]