From a3a88fc9b2e3bc5df63e4ca16040952e809d8354 Mon Sep 17 00:00:00 2001 From: Wagner Bruna Date: Fri, 12 Dec 2025 11:36:54 -0300 Subject: [PATCH] fix: avoid crash loading LoRAs with bf16 weights (#1077) --- ggml_extend.hpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ggml_extend.hpp b/ggml_extend.hpp index 5024eb9..07b9bfb 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -1400,10 +1400,14 @@ __STATIC_INLINE__ void ggml_ext_backend_tensor_get_and_sync(ggml_backend_t backe } __STATIC_INLINE__ float ggml_ext_backend_tensor_get_f32(ggml_tensor* tensor) { - GGML_ASSERT(tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_I32); + GGML_ASSERT(tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_I32 || tensor->type == GGML_TYPE_BF16); float value; if (tensor->type == GGML_TYPE_F32) { ggml_backend_tensor_get(tensor, &value, 0, sizeof(value)); + } else if (tensor->type == GGML_TYPE_BF16) { + ggml_bf16_t bf16_value; + ggml_backend_tensor_get(tensor, &bf16_value, 0, sizeof(bf16_value)); + value = ggml_bf16_to_fp32(bf16_value); } else if (tensor->type == GGML_TYPE_F16) { ggml_fp16_t f16_value; ggml_backend_tensor_get(tensor, &f16_value, 0, sizeof(f16_value));