automatically make the parameters of some unary ops contiguous

This commit is contained in:
leejet 2026-01-25 13:52:24 +08:00
parent e2600bd442
commit 10fe4b094a
15 changed files with 90 additions and 49 deletions

View File

@ -479,9 +479,9 @@ public:
x = fc1->forward(ctx, x);
if (use_gelu) {
x = ggml_gelu_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
} else {
x = ggml_gelu_quick_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu_quick(ctx->ggml_ctx, x, true);
}
x = fc2->forward(ctx, x);
return x;

View File

@ -200,7 +200,7 @@ public:
gate = ggml_cont(ctx->ggml_ctx, gate);
gate = ggml_gelu_inplace(ctx->ggml_ctx, gate);
gate = ggml_ext_gelu(ctx->ggml_ctx, gate, true);
x = ggml_mul(ctx->ggml_ctx, x, gate); // [ne3, ne2, ne1, dim_out]
@ -220,7 +220,7 @@ public:
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
x = proj->forward(ctx, x);
x = ggml_gelu_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
return x;
}
};
@ -536,8 +536,8 @@ public:
// image_only_indicator is always tensor([0.])
float alpha = get_alpha();
auto x = ggml_add(ctx->ggml_ctx,
ggml_scale(ctx->ggml_ctx, x_spatial, alpha),
ggml_scale(ctx->ggml_ctx, x_temporal, 1.0f - alpha));
ggml_ext_scale(ctx->ggml_ctx, x_spatial, alpha),
ggml_ext_scale(ctx->ggml_ctx, x_temporal, 1.0f - alpha));
return x;
}
};

View File

@ -51,7 +51,7 @@ public:
x_cat = ggml_concat(ctx->ggml_ctx, x_cat, x4, 2);
auto x5 = conv5->forward(ctx, x_cat);
x5 = ggml_add(ctx->ggml_ctx, ggml_scale(ctx->ggml_ctx, x5, 0.2f), x);
x5 = ggml_add(ctx->ggml_ctx, ggml_ext_scale(ctx->ggml_ctx, x5, 0.2f), x);
return x5;
}
};
@ -76,7 +76,7 @@ public:
out = rdb2->forward(ctx, out);
out = rdb3->forward(ctx, out);
out = ggml_add(ctx->ggml_ctx, ggml_scale(ctx->ggml_ctx, out, 0.2f), x);
out = ggml_add(ctx->ggml_ctx, ggml_ext_scale(ctx->ggml_ctx, out, 0.2f), x);
return out;
}
};

View File

@ -153,7 +153,7 @@ namespace Flux {
if (use_mlp_silu_act) {
x = ggml_ext_silu_act(ctx->ggml_ctx, x);
} else {
x = ggml_gelu_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
}
x = mlp_2->forward(ctx, x);
return x;
@ -511,7 +511,7 @@ namespace Flux {
} else if (use_mlp_silu_act) {
mlp = ggml_ext_silu_act(ctx->ggml_ctx, mlp);
} else {
mlp = ggml_gelu_inplace(ctx->ggml_ctx, mlp);
mlp = ggml_ext_gelu(ctx->ggml_ctx, mlp, true);
}
auto attn_mlp = ggml_concat(ctx->ggml_ctx, attn, mlp, 0); // [N, n_token, hidden_size + mlp_hidden_dim]
auto output = linear2->forward(ctx, attn_mlp); // [N, n_token, hidden_size]

View File

@ -949,6 +949,49 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_group_norm_32(struct ggml_context
return ggml_group_norm(ctx, a, 32, eps);
}
__STATIC_INLINE__ struct ggml_tensor* ggml_ext_scale(struct ggml_context* ctx,
struct ggml_tensor* x,
float factor,
bool inplace = false) {
if (!ggml_is_contiguous(x)) {
x = ggml_cont(ctx, x);
}
if (inplace) {
x = ggml_scale_inplace(ctx, x, factor);
} else {
x = ggml_scale(ctx, x, factor);
}
return x;
}
__STATIC_INLINE__ struct ggml_tensor* ggml_ext_gelu(struct ggml_context* ctx,
struct ggml_tensor* x,
bool inplace = false) {
if (!ggml_is_contiguous(x)) {
x = ggml_cont(ctx, x);
}
if (inplace) {
x = ggml_gelu_inplace(ctx, x);
} else {
x = ggml_gelu(ctx, x);
}
return x;
}
__STATIC_INLINE__ struct ggml_tensor* ggml_ext_gelu_quick(struct ggml_context* ctx,
struct ggml_tensor* x,
bool inplace = false) {
if (!ggml_is_contiguous(x)) {
x = ggml_cont(ctx, x);
}
if (inplace) {
x = ggml_gelu_quick_inplace(ctx, x);
} else {
x = ggml_gelu_quick(ctx, x);
}
return x;
}
__STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx,
struct ggml_tensor* x,
struct ggml_tensor* w,
@ -956,10 +999,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx,
bool force_prec_f32 = false,
float scale = 1.f) {
if (scale != 1.f) {
if (!ggml_is_contiguous(x)) {
x = ggml_cont(ctx, x);
}
x = ggml_scale(ctx, x, scale);
x = ggml_ext_scale(ctx, x, scale);
}
if (x->ne[2] * x->ne[3] > 1024) {
// workaround: avoid ggml cuda error
@ -978,7 +1018,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_linear(struct ggml_context* ctx,
}
}
if (scale != 1.f) {
x = ggml_scale(ctx, x, 1.f / scale);
x = ggml_ext_scale(ctx, x, 1.f / scale);
}
if (b != nullptr) {
x = ggml_add_inplace(ctx, x, b);
@ -1047,7 +1087,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_conv_2d(struct ggml_context* ctx,
bool circular_y = false,
float scale = 1.f) {
if (scale != 1.f) {
x = ggml_scale(ctx, x, scale);
x = ggml_ext_scale(ctx, x, scale);
}
if (w->ne[2] != x->ne[2] && ggml_n_dims(w) == 2) {
w = ggml_reshape_4d(ctx, w, 1, 1, w->ne[0], w->ne[1]);
@ -1065,7 +1105,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_conv_2d(struct ggml_context* ctx,
x = ggml_conv_2d(ctx, w, x, s0, s1, p0, p1, d0, d1);
}
if (scale != 1.f) {
x = ggml_scale(ctx, x, 1.f / scale);
x = ggml_ext_scale(ctx, x, 1.f / scale);
}
if (b != nullptr) {
b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1);
@ -1163,7 +1203,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_full(struct ggml_context* ctx,
int64_t ne2,
int64_t ne3) {
auto one = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:one");
auto t = ggml_scale(ctx, one, value); // [1,]
auto t = ggml_ext_scale(ctx, one, value); // [1,]
t = ggml_repeat_4d(ctx, t, ne0, ne1, ne2, ne3); // [ne0, ne1, ne2, ne3]
return t;
}
@ -1263,7 +1303,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context
k_in = ggml_pad(ctx, k_in, 0, kv_pad, 0, 0);
}
if (kv_scale != 1.0f) {
k_in = ggml_scale(ctx, k_in, kv_scale);
k_in = ggml_ext_scale(ctx, k_in, kv_scale);
}
k_in = ggml_cast(ctx, k_in, GGML_TYPE_F16);
@ -1273,7 +1313,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context
v_in = ggml_pad(ctx, v_in, 0, kv_pad, 0, 0);
}
if (kv_scale != 1.0f) {
v_in = ggml_scale(ctx, v_in, kv_scale);
v_in = ggml_ext_scale(ctx, v_in, kv_scale);
}
v_in = ggml_cast(ctx, v_in, GGML_TYPE_F16);
@ -1305,7 +1345,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_attention_ext(struct ggml_context
auto out = ggml_flash_attn_ext(ctx, q_in, k_in, v_in, mask_in, scale / kv_scale, 0, 0);
ggml_flash_attn_ext_set_prec(out, GGML_PREC_F32);
if (kv_scale != 1.0f) {
out = ggml_scale(ctx, out, 1.0f / kv_scale);
out = ggml_ext_scale(ctx, out, 1.0f / kv_scale);
}
return out;
};
@ -1515,7 +1555,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_ext_timestep_embedding(
int dim,
int max_period = 10000,
float time_factor = 1.0f) {
timesteps = ggml_scale(ctx, timesteps, time_factor);
timesteps = ggml_ext_scale(ctx, timesteps, time_factor);
return ggml_timestep_embedding(ctx, timesteps, dim, max_period);
}

View File

@ -638,7 +638,7 @@ namespace LLM {
x = ln_q->forward(ctx, x);
x = ggml_reshape_2d(ctx->ggml_ctx, x, hidden_size, ggml_nelements(x) / hidden_size);
x = mlp_0->forward(ctx, x);
x = ggml_gelu(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x);
x = mlp_2->forward(ctx, x);
return x;
}

View File

@ -195,7 +195,7 @@ struct LoraModel : public GGMLRunner {
scale_value *= multiplier;
auto curr_updown = ggml_ext_merge_lora(ctx, lora_down, lora_up, lora_mid);
curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value);
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
if (updown == nullptr) {
updown = curr_updown;
@ -235,7 +235,7 @@ struct LoraModel : public GGMLRunner {
float scale_value = 1.0f;
scale_value *= multiplier;
curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value);
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
if (updown == nullptr) {
updown = curr_updown;
@ -340,7 +340,7 @@ struct LoraModel : public GGMLRunner {
struct ggml_tensor* updown_1 = ggml_ext_merge_lora(ctx, hada_1_down, hada_1_up, hada_1_mid);
struct ggml_tensor* updown_2 = ggml_ext_merge_lora(ctx, hada_2_down, hada_2_up, hada_2_mid);
auto curr_updown = ggml_mul_inplace(ctx, updown_1, updown_2);
curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value);
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
if (updown == nullptr) {
updown = curr_updown;
} else {
@ -456,7 +456,7 @@ struct LoraModel : public GGMLRunner {
scale_value *= multiplier;
auto curr_updown = ggml_ext_kronecker(ctx, lokr_w1, lokr_w2);
curr_updown = ggml_scale_inplace(ctx, curr_updown, scale_value);
curr_updown = ggml_ext_scale(ctx, curr_updown, scale_value, true);
if (updown == nullptr) {
updown = curr_updown;
@ -634,7 +634,7 @@ struct LoraModel : public GGMLRunner {
forward_params.conv2d.scale);
}
auto curr_out_diff = ggml_scale_inplace(ctx, lx, scale_value);
auto curr_out_diff = ggml_ext_scale(ctx, lx, scale_value, true);
if (out_diff == nullptr) {
out_diff = curr_out_diff;

View File

@ -33,7 +33,7 @@ public:
auto fc2 = std::dynamic_pointer_cast<Linear>(blocks["fc2"]);
x = fc1->forward(ctx, x);
x = ggml_gelu_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
x = fc2->forward(ctx, x);
return x;
}

View File

@ -33,7 +33,7 @@ public:
x = layer_norm->forward(ctx, x);
// x = ggml_add(ctx, ggml_mul_mat(ctx, fc1_w, x), fc1_b);
x = fc1->forward(ctx, x);
x = ggml_gelu_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
x = fc2->forward(ctx, x);
// x = ggml_add(ctx, ggml_mul_mat(ctx, fc2_w, x), fc2_b);
if (use_residue)
@ -129,8 +129,8 @@ public:
k = reshape_tensor(ctx->ggml_ctx, k, heads);
v = reshape_tensor(ctx->ggml_ctx, v, heads);
scale = 1.f / sqrt(sqrt((float)dim_head));
k = ggml_scale_inplace(ctx->ggml_ctx, k, scale);
q = ggml_scale_inplace(ctx->ggml_ctx, q, scale);
k = ggml_ext_scale(ctx->ggml_ctx, k, scale, true);
q = ggml_ext_scale(ctx->ggml_ctx, q, scale, true);
// auto weight = ggml_mul_mat(ctx, q, k);
auto weight = ggml_mul_mat(ctx->ggml_ctx, k, q); // NOTE order of mul is opposite to pytorch

4
t5.hpp
View File

@ -515,7 +515,7 @@ public:
auto wi_1 = std::dynamic_pointer_cast<Linear>(blocks["wi_1"]);
auto wo = std::dynamic_pointer_cast<Linear>(blocks["wo"]);
auto hidden_gelu = ggml_gelu_inplace(ctx->ggml_ctx, wi_0->forward(ctx, x));
auto hidden_gelu = ggml_ext_gelu(ctx->ggml_ctx, wi_0->forward(ctx, x), true);
auto hidden_linear = wi_1->forward(ctx, x);
x = ggml_mul_inplace(ctx->ggml_ctx, hidden_gelu, hidden_linear);
x = wo->forward(ctx, x);
@ -608,7 +608,7 @@ public:
}
}
k = ggml_scale_inplace(ctx->ggml_ctx, k, ::sqrtf(static_cast<float>(d_head)));
k = ggml_ext_scale(ctx->ggml_ctx, k, ::sqrtf(static_cast<float>(d_head)), true);
x = ggml_ext_attention_ext(ctx->ggml_ctx, ctx->backend, q, k, v, num_heads, mask); // [N, n_token, d_head * n_head]

13
tae.hpp
View File

@ -161,9 +161,9 @@ public:
// z: [n, z_channels, h, w]
// return: [n, out_channels, h*8, w*8]
auto h = ggml_scale(ctx->ggml_ctx, z, 1.0f / 3.0f);
auto h = ggml_ext_scale(ctx->ggml_ctx, z, 1.0f / 3.0f);
h = ggml_tanh_inplace(ctx->ggml_ctx, h);
h = ggml_scale(ctx->ggml_ctx, h, 3.0f);
h = ggml_ext_scale(ctx->ggml_ctx, h, 3.0f);
for (int i = 0; i < num_blocks * 3 + 10; i++) {
if (blocks.find(std::to_string(i)) == blocks.end()) {
@ -400,10 +400,11 @@ public:
auto first_conv = std::dynamic_pointer_cast<Conv2d>(blocks["1"]);
// Clamp()
auto h = ggml_scale_inplace(ctx->ggml_ctx,
ggml_tanh_inplace(ctx->ggml_ctx,
ggml_scale(ctx->ggml_ctx, z, 1.0f / 3.0f)),
3.0f);
auto h = ggml_ext_scale(ctx->ggml_ctx,
ggml_tanh_inplace(ctx->ggml_ctx,
ggml_ext_scale(ctx->ggml_ctx, z, 1.0f / 3.0f)),
3.0f,
true);
h = first_conv->forward(ctx, h);
h = ggml_relu_inplace(ctx->ggml_ctx, h);

View File

@ -529,7 +529,7 @@ public:
}
}
if (controls.size() > 0) {
auto cs = ggml_scale_inplace(ctx->ggml_ctx, controls[controls.size() - 1], control_strength);
auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[controls.size() - 1], control_strength, true);
h = ggml_add(ctx->ggml_ctx, h, cs); // middle control
}
int control_offset = static_cast<int>(controls.size() - 2);
@ -542,7 +542,7 @@ public:
hs.pop_back();
if (controls.size() > 0) {
auto cs = ggml_scale_inplace(ctx->ggml_ctx, controls[control_offset], control_strength);
auto cs = ggml_ext_scale(ctx->ggml_ctx, controls[control_offset], control_strength, true);
h_skip = ggml_add(ctx->ggml_ctx, h_skip, cs); // control net condition
control_offset--;
}

View File

@ -253,8 +253,8 @@ public:
float alpha = get_alpha();
x = ggml_add(ctx->ggml_ctx,
ggml_scale(ctx->ggml_ctx, x, alpha),
ggml_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha));
ggml_ext_scale(ctx->ggml_ctx, x, alpha),
ggml_ext_scale(ctx->ggml_ctx, x_mix, 1.0f - alpha));
x = ggml_cont(ctx->ggml_ctx, ggml_permute(ctx->ggml_ctx, x, 0, 2, 1, 3)); // b c t (h w) -> b t c (h w)
x = ggml_reshape_4d(ctx->ggml_ctx, x, W, H, C, T * B); // b t c (h w) -> (b t) c h w

View File

@ -1576,7 +1576,7 @@ namespace WAN {
y = modulate_add(ctx->ggml_ctx, y, es[3]);
y = ffn_0->forward(ctx, y);
y = ggml_gelu_inplace(ctx->ggml_ctx, y);
y = ggml_ext_gelu(ctx->ggml_ctx, y, true);
y = ffn_2->forward(ctx, y);
x = ggml_add(ctx->ggml_ctx, x, modulate_mul(ctx->ggml_ctx, y, es[5]));
@ -1723,7 +1723,7 @@ namespace WAN {
auto x = proj_0->forward(ctx, image_embeds);
x = proj_1->forward(ctx, x);
x = ggml_gelu_inplace(ctx->ggml_ctx, x);
x = ggml_ext_gelu(ctx->ggml_ctx, x, true);
x = proj_3->forward(ctx, x);
x = proj_4->forward(ctx, x);
@ -1910,7 +1910,7 @@ namespace WAN {
e0 = ggml_reshape_4d(ctx->ggml_ctx, e0, e0->ne[0] / 6, 6, e0->ne[1], e0->ne[2]); // [N, 6, dim] or [N, T, 6, dim]
context = text_embedding_0->forward(ctx, context);
context = ggml_gelu(ctx->ggml_ctx, context);
context = ggml_ext_gelu(ctx->ggml_ctx, context);
context = text_embedding_2->forward(ctx, context); // [N, context_txt_len, dim]
int64_t context_img_len = 0;
@ -1949,7 +1949,7 @@ namespace WAN {
auto result = vace_block->forward(ctx, c, x_orig, e0, pe, context, context_img_len);
auto c_skip = result.first;
c = result.second;
c_skip = ggml_scale(ctx->ggml_ctx, c_skip, vace_strength);
c_skip = ggml_ext_scale(ctx->ggml_ctx, c_skip, vace_strength);
x = ggml_add(ctx->ggml_ctx, x, c_skip);
}
}

View File

@ -517,7 +517,7 @@ namespace ZImage {
out = ggml_ext_slice(ctx->ggml_ctx, out, 1, 0, H); // [N, C, H, W + pad_w]
out = ggml_ext_slice(ctx->ggml_ctx, out, 0, 0, W); // [N, C, H, W]
out = ggml_scale(ctx->ggml_ctx, out, -1.f);
out = ggml_ext_scale(ctx->ggml_ctx, out, -1.f);
return out;
}