fix: skip empty MultiLoraAdapter when no LoRAs target a model (#1469)

This commit is contained in:
fszontagh 2026-05-06 15:45:47 +02:00 committed by GitHub
parent 3d6064b37e
commit 9097ce5211
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1104,9 +1104,14 @@ public:
cond_stage_lora_models.push_back(lora); cond_stage_lora_models.push_back(lora);
} }
} }
// Only attach the adapter when there are LoRAs targeting the cond_stage model.
// An empty MultiLoraAdapter still routes every linear/conv through
// forward_with_lora() instead of the direct kernel path — slower for no benefit.
if (!cond_stage_lora_models.empty()) {
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(cond_stage_lora_models); auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(cond_stage_lora_models);
cond_stage_model->set_weight_adapter(multi_lora_adapter); cond_stage_model->set_weight_adapter(multi_lora_adapter);
} }
}
if (diffusion_model) { if (diffusion_model) {
std::vector<std::shared_ptr<LoraModel>> lora_models; std::vector<std::shared_ptr<LoraModel>> lora_models;
auto lora_state_diff = lora_state; auto lora_state_diff = lora_state;
@ -1136,12 +1141,14 @@ public:
diffusion_lora_models.push_back(lora); diffusion_lora_models.push_back(lora);
} }
} }
if (!diffusion_lora_models.empty()) {
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(diffusion_lora_models); auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(diffusion_lora_models);
diffusion_model->set_weight_adapter(multi_lora_adapter); diffusion_model->set_weight_adapter(multi_lora_adapter);
if (high_noise_diffusion_model) { if (high_noise_diffusion_model) {
high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter); high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter);
} }
} }
}
if (first_stage_model) { if (first_stage_model) {
std::vector<std::shared_ptr<LoraModel>> lora_models; std::vector<std::shared_ptr<LoraModel>> lora_models;
@ -1172,10 +1179,12 @@ public:
first_stage_lora_models.push_back(lora); first_stage_lora_models.push_back(lora);
} }
} }
if (!first_stage_lora_models.empty()) {
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(first_stage_lora_models); auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(first_stage_lora_models);
first_stage_model->set_weight_adapter(multi_lora_adapter); first_stage_model->set_weight_adapter(multi_lora_adapter);
} }
} }
}
void lora_stat() { void lora_stat() {
if (!cond_stage_lora_models.empty()) { if (!cond_stage_lora_models.empty()) {