Fix token length

This commit is contained in:
Stéphane du Hamel 2025-12-08 01:36:17 +01:00
parent a907fe2851
commit fc8d85e133

View File

@ -1698,7 +1698,7 @@ struct LLMEmbedder : public Conditioner {
std::vector<std::pair<int, ggml_tensor*>> image_embeds; std::vector<std::pair<int, ggml_tensor*>> image_embeds;
std::pair<int, int> prompt_attn_range; std::pair<int, int> prompt_attn_range;
int prompt_template_encode_start_idx = 34; int prompt_template_encode_start_idx = 34;
int max_length = 0; int max_length = 0;
std::set<int> out_layers; std::set<int> out_layers;
if (llm->enable_vision && conditioner_params.ref_images.size() > 0) { if (llm->enable_vision && conditioner_params.ref_images.size() > 0) {
LOG_INFO("QwenImageEditPlusPipeline"); LOG_INFO("QwenImageEditPlusPipeline");
@ -1810,6 +1810,7 @@ struct LLMEmbedder : public Conditioner {
} else if (sd_version_is_longcat(version)) { } else if (sd_version_is_longcat(version)) {
prompt_template_encode_start_idx = 36; prompt_template_encode_start_idx = 36;
// prompt_template_encode_end_idx = 5; // prompt_template_encode_end_idx = 5;
max_length = 512;
prompt = "<|im_start|>system\nAs an image captioning expert, generate a descriptive text prompt based on an image content, suitable for input to a text-to-image model.<|im_end|>\n<|im_start|>user\n"; prompt = "<|im_start|>system\nAs an image captioning expert, generate a descriptive text prompt based on an image content, suitable for input to a text-to-image model.<|im_end|>\n<|im_start|>user\n";