From 7519e2f11ac36f85819e305ded1f0dcca260e015 Mon Sep 17 00:00:00 2001 From: leejet Date: Sun, 12 Oct 2025 17:27:51 +0800 Subject: [PATCH] to_add_out precision fix --- qwen_image.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/qwen_image.hpp b/qwen_image.hpp index ab16b82..90357af 100644 --- a/qwen_image.hpp +++ b/qwen_image.hpp @@ -97,7 +97,10 @@ namespace Qwen { blocks["to_out.0"] = std::shared_ptr(new Linear(inner_dim, out_dim, out_bias)); // to_out.1 is nn.Dropout - blocks["to_add_out"] = std::shared_ptr(new Linear(inner_dim, out_context_dim, out_bias)); + float scale = 1.f / 32.f; + // The purpose of the scale here is to prevent NaN issues in certain situations. + // For example when using CUDA but the weights are k-quants (not all prompts). + blocks["to_add_out"] = std::shared_ptr(new Linear(inner_dim, out_context_dim, out_bias, false, false, scale)); } std::pair forward(struct ggml_context* ctx,