mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2025-12-12 21:38:58 +00:00
fix: avoid crash loading LoRAs with bf16 weights (#1077)
This commit is contained in:
parent
8823dc48bc
commit
a3a88fc9b2
@ -1400,10 +1400,14 @@ __STATIC_INLINE__ void ggml_ext_backend_tensor_get_and_sync(ggml_backend_t backe
|
||||
}
|
||||
|
||||
__STATIC_INLINE__ float ggml_ext_backend_tensor_get_f32(ggml_tensor* tensor) {
|
||||
GGML_ASSERT(tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_I32);
|
||||
GGML_ASSERT(tensor->type == GGML_TYPE_F32 || tensor->type == GGML_TYPE_F16 || tensor->type == GGML_TYPE_I32 || tensor->type == GGML_TYPE_BF16);
|
||||
float value;
|
||||
if (tensor->type == GGML_TYPE_F32) {
|
||||
ggml_backend_tensor_get(tensor, &value, 0, sizeof(value));
|
||||
} else if (tensor->type == GGML_TYPE_BF16) {
|
||||
ggml_bf16_t bf16_value;
|
||||
ggml_backend_tensor_get(tensor, &bf16_value, 0, sizeof(bf16_value));
|
||||
value = ggml_bf16_to_fp32(bf16_value);
|
||||
} else if (tensor->type == GGML_TYPE_F16) {
|
||||
ggml_fp16_t f16_value;
|
||||
ggml_backend_tensor_get(tensor, &f16_value, 0, sizeof(f16_value));
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user