fix: skip empty MultiLoraAdapter when no LoRAs target a model (#1469)

This commit is contained in:
fszontagh 2026-05-06 15:45:47 +02:00 committed by GitHub
parent 3d6064b37e
commit 9097ce5211
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1104,8 +1104,13 @@ public:
cond_stage_lora_models.push_back(lora); cond_stage_lora_models.push_back(lora);
} }
} }
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(cond_stage_lora_models); // Only attach the adapter when there are LoRAs targeting the cond_stage model.
cond_stage_model->set_weight_adapter(multi_lora_adapter); // An empty MultiLoraAdapter still routes every linear/conv through
// forward_with_lora() instead of the direct kernel path — slower for no benefit.
if (!cond_stage_lora_models.empty()) {
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(cond_stage_lora_models);
cond_stage_model->set_weight_adapter(multi_lora_adapter);
}
} }
if (diffusion_model) { if (diffusion_model) {
std::vector<std::shared_ptr<LoraModel>> lora_models; std::vector<std::shared_ptr<LoraModel>> lora_models;
@ -1136,10 +1141,12 @@ public:
diffusion_lora_models.push_back(lora); diffusion_lora_models.push_back(lora);
} }
} }
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(diffusion_lora_models); if (!diffusion_lora_models.empty()) {
diffusion_model->set_weight_adapter(multi_lora_adapter); auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(diffusion_lora_models);
if (high_noise_diffusion_model) { diffusion_model->set_weight_adapter(multi_lora_adapter);
high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter); if (high_noise_diffusion_model) {
high_noise_diffusion_model->set_weight_adapter(multi_lora_adapter);
}
} }
} }
@ -1172,8 +1179,10 @@ public:
first_stage_lora_models.push_back(lora); first_stage_lora_models.push_back(lora);
} }
} }
auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(first_stage_lora_models); if (!first_stage_lora_models.empty()) {
first_stage_model->set_weight_adapter(multi_lora_adapter); auto multi_lora_adapter = std::make_shared<MultiLoraAdapter>(first_stage_lora_models);
first_stage_model->set_weight_adapter(multi_lora_adapter);
}
} }
} }