From 9097ce5211950a98e2fd557c68d0a43bc7c2de0a Mon Sep 17 00:00:00 2001 From: fszontagh <51741446+fszontagh@users.noreply.github.com> Date: Wed, 6 May 2026 15:45:47 +0200 Subject: [PATCH] fix: skip empty MultiLoraAdapter when no LoRAs target a model (#1469) --- src/stable-diffusion.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 88102ff6..860cff85 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1104,8 +1104,13 @@ public: cond_stage_lora_models.push_back(lora); } } - auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); - cond_stage_model->set_weight_adapter(multi_lora_adapter); + // Only attach the adapter when there are LoRAs targeting the cond_stage model. + // An empty MultiLoraAdapter still routes every linear/conv through + // forward_with_lora() instead of the direct kernel path — slower for no benefit. + if (!cond_stage_lora_models.empty()) { + auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); + cond_stage_model->set_weight_adapter(multi_lora_adapter); + } } if (diffusion_model) { std::vector> lora_models; @@ -1136,10 +1141,12 @@ public: diffusion_lora_models.push_back(lora); } } - auto multi_lora_adapter = std::make_shared(diffusion_lora_models); - diffusion_model->set_weight_adapter(multi_lora_adapter); - if (high_noise_diffusion_model) { - high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter); + if (!diffusion_lora_models.empty()) { + auto multi_lora_adapter = std::make_shared(diffusion_lora_models); + diffusion_model->set_weight_adapter(multi_lora_adapter); + if (high_noise_diffusion_model) { + high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter); + } } } @@ -1172,8 +1179,10 @@ public: first_stage_lora_models.push_back(lora); } } - auto multi_lora_adapter = std::make_shared(first_stage_lora_models); - first_stage_model->set_weight_adapter(multi_lora_adapter); + if (!first_stage_lora_models.empty()) { + auto multi_lora_adapter = std::make_shared(first_stage_lora_models); + first_stage_model->set_weight_adapter(multi_lora_adapter); + } } }