mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2025-12-12 13:28:37 +00:00
feat: add VAE encoding tiling support and adaptive overlap (#484)
* implement tiling vae encode support * Tiling (vae/upscale): adaptative overlap * Tiling: fix edge case * Tiling: fix crash when less than 2 tiles per dim * remove extra dot * Tiling: fix edge cases for adaptative overlap * tiling: fix edge case * set vae tile size via env var * vae tiling: refactor again, base on smaller buffer for alignment * Use bigger tiles for encode (to match compute buffer size) * Fix edge case when tile is bigger than latent * non-square VAE tiling (#3) * refactor tile number calculation * support non-square tiles * add env var to change tile overlap * add safeguards and better error messages for SD_TILE_OVERLAP * add safeguards and include overlapping factor for SD_TILE_SIZE * avoid rounding issues when specifying SD_TILE_SIZE as a factor * lower SD_TILE_OVERLAP limit * zero-init empty output buffer * Fix decode latent size * fix encode * tile size params instead of env * Tiled vae parameter validation (#6) * avoid crash with invalid tile sizes, use 0 for default * refactor default tile size, limit overlap factor * remove explicit parameter for relative tile size * limit encoding tile to latent size * unify code style and format code * update docs * fix get_tile_sizes in decode_first_stage --------- Co-authored-by: Wagner Bruna <wbruna@users.noreply.github.com> Co-authored-by: leejet <leejet714@gmail.com>
This commit is contained in:
parent
288e2d63c0
commit
2c9b1e2594
@ -356,6 +356,9 @@ arguments:
|
||||
--clip-skip N ignore last_dot_pos layers of CLIP network; 1 ignores none, 2 ignores one layer (default: -1)
|
||||
<= 0 represents unspecified, will be 1 for SD1.x, 2 for SD2.x
|
||||
--vae-tiling process vae in tiles to reduce memory usage
|
||||
--vae-tile-size [X]x[Y] tile size for vae tiling (default: 32x32)
|
||||
--vae-relative-tile-size [X]x[Y] relative tile size for vae tiling, in fraction of image size if < 1, in number of tiles per dim if >=1 (overrides --vae-tile-size)
|
||||
--vae-tile-overlap OVERLAP tile overlap for vae tiling, in fraction of tile size (default: 0.5)
|
||||
--vae-on-cpu keep vae in cpu (for low vram)
|
||||
--clip-on-cpu keep clip in cpu (for low vram)
|
||||
--diffusion-fa use flash attention in the diffusion model (for low vram)
|
||||
|
||||
@ -101,7 +101,6 @@ struct SDParams {
|
||||
rng_type_t rng_type = CUDA_RNG;
|
||||
int64_t seed = 42;
|
||||
bool verbose = false;
|
||||
bool vae_tiling = false;
|
||||
bool offload_params_to_cpu = false;
|
||||
bool control_net_cpu = false;
|
||||
bool normalize_input = false;
|
||||
@ -119,6 +118,8 @@ struct SDParams {
|
||||
int chroma_t5_mask_pad = 1;
|
||||
float flow_shift = INFINITY;
|
||||
|
||||
sd_tiling_params_t vae_tiling_params = {false, 0, 0, 0.5f, 0.0f, 0.0f};
|
||||
|
||||
SDParams() {
|
||||
sd_sample_params_init(&sample_params);
|
||||
sd_sample_params_init(&high_noise_sample_params);
|
||||
@ -180,7 +181,7 @@ void print_params(SDParams params) {
|
||||
printf(" rng: %s\n", sd_rng_type_name(params.rng_type));
|
||||
printf(" seed: %ld\n", params.seed);
|
||||
printf(" batch_count: %d\n", params.batch_count);
|
||||
printf(" vae_tiling: %s\n", params.vae_tiling ? "true" : "false");
|
||||
printf(" vae_tiling: %s\n", params.vae_tiling_params.enabled ? "true" : "false");
|
||||
printf(" upscale_repeats: %d\n", params.upscale_repeats);
|
||||
printf(" chroma_use_dit_mask: %s\n", params.chroma_use_dit_mask ? "true" : "false");
|
||||
printf(" chroma_use_t5_mask: %s\n", params.chroma_use_t5_mask ? "true" : "false");
|
||||
@ -268,6 +269,9 @@ void print_usage(int argc, const char* argv[]) {
|
||||
printf(" --clip-skip N ignore last_dot_pos layers of CLIP network; 1 ignores none, 2 ignores one layer (default: -1)\n");
|
||||
printf(" <= 0 represents unspecified, will be 1 for SD1.x, 2 for SD2.x\n");
|
||||
printf(" --vae-tiling process vae in tiles to reduce memory usage\n");
|
||||
printf(" --vae-tile-size [X]x[Y] tile size for vae tiling (default: 32x32)\n");
|
||||
printf(" --vae-relative-tile-size [X]x[Y] relative tile size for vae tiling, in fraction of image size if < 1, in number of tiles per dim if >=1 (overrides --vae-tile-size)\n");
|
||||
printf(" --vae-tile-overlap OVERLAP tile overlap for vae tiling, in fraction of tile size (default: 0.5)\n");
|
||||
printf(" --vae-on-cpu keep vae in cpu (for low vram)\n");
|
||||
printf(" --clip-on-cpu keep clip in cpu (for low vram)\n");
|
||||
printf(" --diffusion-fa use flash attention in the diffusion model (for low vram)\n");
|
||||
@ -485,7 +489,6 @@ void parse_args(int argc, const char** argv, SDParams& params) {
|
||||
{"-o", "--output", "", ¶ms.output_path},
|
||||
{"-p", "--prompt", "", ¶ms.prompt},
|
||||
{"-n", "--negative-prompt", "", ¶ms.negative_prompt},
|
||||
|
||||
{"", "--upscale-model", "", ¶ms.esrgan_path},
|
||||
};
|
||||
|
||||
@ -523,10 +526,11 @@ void parse_args(int argc, const char** argv, SDParams& params) {
|
||||
{"", "--control-strength", "", ¶ms.control_strength},
|
||||
{"", "--moe-boundary", "", ¶ms.moe_boundary},
|
||||
{"", "--flow-shift", "", ¶ms.flow_shift},
|
||||
{"", "--vae-tile-overlap", "", ¶ms.vae_tiling_params.target_overlap},
|
||||
};
|
||||
|
||||
options.bool_options = {
|
||||
{"", "--vae-tiling", "", true, ¶ms.vae_tiling},
|
||||
{"", "--vae-tiling", "", true, ¶ms.vae_tiling_params.enabled},
|
||||
{"", "--offload-to-cpu", "", true, ¶ms.offload_params_to_cpu},
|
||||
{"", "--control-net-cpu", "", true, ¶ms.control_net_cpu},
|
||||
{"", "--normalize-input", "", true, ¶ms.normalize_input},
|
||||
@ -726,6 +730,52 @@ void parse_args(int argc, const char** argv, SDParams& params) {
|
||||
return 1;
|
||||
};
|
||||
|
||||
auto on_tile_size_arg = [&](int argc, const char** argv, int index) {
|
||||
if (++index >= argc) {
|
||||
return -1;
|
||||
}
|
||||
std::string tile_size_str = argv[index];
|
||||
size_t x_pos = tile_size_str.find('x');
|
||||
try {
|
||||
if (x_pos != std::string::npos) {
|
||||
std::string tile_x_str = tile_size_str.substr(0, x_pos);
|
||||
std::string tile_y_str = tile_size_str.substr(x_pos + 1);
|
||||
params.vae_tiling_params.tile_size_x = std::stoi(tile_x_str);
|
||||
params.vae_tiling_params.tile_size_y = std::stoi(tile_y_str);
|
||||
} else {
|
||||
params.vae_tiling_params.tile_size_x = params.vae_tiling_params.tile_size_y = std::stoi(tile_size_str);
|
||||
}
|
||||
} catch (const std::invalid_argument& e) {
|
||||
return -1;
|
||||
} catch (const std::out_of_range& e) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
};
|
||||
|
||||
auto on_relative_tile_size_arg = [&](int argc, const char** argv, int index) {
|
||||
if (++index >= argc) {
|
||||
return -1;
|
||||
}
|
||||
std::string rel_size_str = argv[index];
|
||||
size_t x_pos = rel_size_str.find('x');
|
||||
try {
|
||||
if (x_pos != std::string::npos) {
|
||||
std::string rel_x_str = rel_size_str.substr(0, x_pos);
|
||||
std::string rel_y_str = rel_size_str.substr(x_pos + 1);
|
||||
params.vae_tiling_params.rel_size_x = std::stof(rel_x_str);
|
||||
params.vae_tiling_params.rel_size_y = std::stof(rel_y_str);
|
||||
} else {
|
||||
params.vae_tiling_params.rel_size_x = params.vae_tiling_params.rel_size_y = std::stof(rel_size_str);
|
||||
}
|
||||
} catch (const std::invalid_argument& e) {
|
||||
return -1;
|
||||
} catch (const std::out_of_range& e) {
|
||||
return -1;
|
||||
}
|
||||
return 1;
|
||||
};
|
||||
|
||||
options.manual_options = {
|
||||
{"-M", "--mode", "", on_mode_arg},
|
||||
{"", "--type", "", on_type_arg},
|
||||
@ -739,6 +789,8 @@ void parse_args(int argc, const char** argv, SDParams& params) {
|
||||
{"", "--high-noise-skip-layers", "", on_high_noise_skip_layers_arg},
|
||||
{"-r", "--ref-image", "", on_ref_image_arg},
|
||||
{"-h", "--help", "", on_help_arg},
|
||||
{"", "--vae-tile-size", "", on_tile_size_arg},
|
||||
{"", "--vae-relative-tile-size", "", on_relative_tile_size_arg},
|
||||
};
|
||||
|
||||
if (!parse_options(argc, argv, options)) {
|
||||
@ -1176,7 +1228,6 @@ int main(int argc, const char* argv[]) {
|
||||
params.embedding_dir.c_str(),
|
||||
params.stacked_id_embed_dir.c_str(),
|
||||
vae_decode_only,
|
||||
params.vae_tiling,
|
||||
true,
|
||||
params.n_threads,
|
||||
params.wtype,
|
||||
@ -1229,6 +1280,7 @@ int main(int argc, const char* argv[]) {
|
||||
params.style_ratio,
|
||||
params.normalize_input,
|
||||
params.input_id_images_path.c_str(),
|
||||
params.vae_tiling_params,
|
||||
};
|
||||
|
||||
results = generate_image(sd_ctx, &img_gen_params);
|
||||
|
||||
165
ggml_extend.hpp
165
ggml_extend.hpp
@ -494,7 +494,10 @@ __STATIC_INLINE__ void ggml_merge_tensor_2d(struct ggml_tensor* input,
|
||||
struct ggml_tensor* output,
|
||||
int x,
|
||||
int y,
|
||||
int overlap) {
|
||||
int overlap_x,
|
||||
int overlap_y,
|
||||
int x_skip = 0,
|
||||
int y_skip = 0) {
|
||||
int64_t width = input->ne[0];
|
||||
int64_t height = input->ne[1];
|
||||
int64_t channels = input->ne[2];
|
||||
@ -503,17 +506,17 @@ __STATIC_INLINE__ void ggml_merge_tensor_2d(struct ggml_tensor* input,
|
||||
int64_t img_height = output->ne[1];
|
||||
|
||||
GGML_ASSERT(input->type == GGML_TYPE_F32 && output->type == GGML_TYPE_F32);
|
||||
for (int iy = 0; iy < height; iy++) {
|
||||
for (int ix = 0; ix < width; ix++) {
|
||||
for (int iy = y_skip; iy < height; iy++) {
|
||||
for (int ix = x_skip; ix < width; ix++) {
|
||||
for (int k = 0; k < channels; k++) {
|
||||
float new_value = ggml_tensor_get_f32(input, ix, iy, k);
|
||||
if (overlap > 0) { // blend colors in overlapped area
|
||||
if (overlap_x > 0 || overlap_y > 0) { // blend colors in overlapped area
|
||||
float old_value = ggml_tensor_get_f32(output, x + ix, y + iy, k);
|
||||
|
||||
const float x_f_0 = (x > 0) ? ix / float(overlap) : 1;
|
||||
const float x_f_1 = (x < (img_width - width)) ? (width - ix) / float(overlap) : 1;
|
||||
const float y_f_0 = (y > 0) ? iy / float(overlap) : 1;
|
||||
const float y_f_1 = (y < (img_height - height)) ? (height - iy) / float(overlap) : 1;
|
||||
const float x_f_0 = (overlap_x > 0 && x > 0) ? (ix - x_skip) / float(overlap_x) : 1;
|
||||
const float x_f_1 = (overlap_x > 0 && x < (img_width - width)) ? (width - ix) / float(overlap_x) : 1;
|
||||
const float y_f_0 = (overlap_y > 0 && y > 0) ? (iy - y_skip) / float(overlap_y) : 1;
|
||||
const float y_f_1 = (overlap_y > 0 && y < (img_height - height)) ? (height - iy) / float(overlap_y) : 1;
|
||||
|
||||
const float x_f = std::min(std::min(x_f_0, x_f_1), 1.f);
|
||||
const float y_f = std::min(std::min(y_f_0, y_f_1), 1.f);
|
||||
@ -745,22 +748,102 @@ __STATIC_INLINE__ std::vector<struct ggml_tensor*> ggml_chunk(struct ggml_contex
|
||||
|
||||
typedef std::function<void(ggml_tensor*, ggml_tensor*, bool)> on_tile_process;
|
||||
|
||||
__STATIC_INLINE__ void sd_tiling_calc_tiles(int& num_tiles_dim,
|
||||
float& tile_overlap_factor_dim,
|
||||
int small_dim,
|
||||
int tile_size,
|
||||
const float tile_overlap_factor) {
|
||||
int tile_overlap = (tile_size * tile_overlap_factor);
|
||||
int non_tile_overlap = tile_size - tile_overlap;
|
||||
|
||||
num_tiles_dim = (small_dim - tile_overlap) / non_tile_overlap;
|
||||
int overshoot_dim = ((num_tiles_dim + 1) * non_tile_overlap + tile_overlap) % small_dim;
|
||||
|
||||
if ((overshoot_dim != non_tile_overlap) && (overshoot_dim <= num_tiles_dim * (tile_size / 2 - tile_overlap))) {
|
||||
// if tiles don't fit perfectly using the desired overlap
|
||||
// and there is enough room to squeeze an extra tile without overlap becoming >0.5
|
||||
num_tiles_dim++;
|
||||
}
|
||||
|
||||
tile_overlap_factor_dim = (float)(tile_size * num_tiles_dim - small_dim) / (float)(tile_size * (num_tiles_dim - 1));
|
||||
if (num_tiles_dim <= 2) {
|
||||
if (small_dim <= tile_size) {
|
||||
num_tiles_dim = 1;
|
||||
tile_overlap_factor_dim = 0;
|
||||
} else {
|
||||
num_tiles_dim = 2;
|
||||
tile_overlap_factor_dim = (2 * tile_size - small_dim) / (float)tile_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tiling
|
||||
__STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const int scale, const int tile_size, const float tile_overlap_factor, on_tile_process on_processing) {
|
||||
__STATIC_INLINE__ void sd_tiling_non_square(ggml_tensor* input,
|
||||
ggml_tensor* output,
|
||||
const int scale,
|
||||
const int p_tile_size_x,
|
||||
const int p_tile_size_y,
|
||||
const float tile_overlap_factor,
|
||||
on_tile_process on_processing) {
|
||||
output = ggml_set_f32(output, 0);
|
||||
|
||||
int input_width = (int)input->ne[0];
|
||||
int input_height = (int)input->ne[1];
|
||||
int output_width = (int)output->ne[0];
|
||||
int output_height = (int)output->ne[1];
|
||||
|
||||
GGML_ASSERT(((input_width / output_width) == (input_height / output_height)) &&
|
||||
((output_width / input_width) == (output_height / input_height)));
|
||||
GGML_ASSERT(((input_width / output_width) == scale) ||
|
||||
((output_width / input_width) == scale));
|
||||
|
||||
int small_width = output_width;
|
||||
int small_height = output_height;
|
||||
|
||||
bool decode = output_width > input_width;
|
||||
if (decode) {
|
||||
small_width = input_width;
|
||||
small_height = input_height;
|
||||
}
|
||||
|
||||
int num_tiles_x;
|
||||
float tile_overlap_factor_x;
|
||||
sd_tiling_calc_tiles(num_tiles_x, tile_overlap_factor_x, small_width, p_tile_size_x, tile_overlap_factor);
|
||||
|
||||
int num_tiles_y;
|
||||
float tile_overlap_factor_y;
|
||||
sd_tiling_calc_tiles(num_tiles_y, tile_overlap_factor_y, small_height, p_tile_size_y, tile_overlap_factor);
|
||||
|
||||
LOG_DEBUG("num tiles : %d, %d ", num_tiles_x, num_tiles_y);
|
||||
LOG_DEBUG("optimal overlap : %f, %f (targeting %f)", tile_overlap_factor_x, tile_overlap_factor_y, tile_overlap_factor);
|
||||
|
||||
GGML_ASSERT(input_width % 2 == 0 && input_height % 2 == 0 && output_width % 2 == 0 && output_height % 2 == 0); // should be multiple of 2
|
||||
|
||||
int tile_overlap = (int32_t)(tile_size * tile_overlap_factor);
|
||||
int non_tile_overlap = tile_size - tile_overlap;
|
||||
int tile_overlap_x = (int32_t)(p_tile_size_x * tile_overlap_factor_x);
|
||||
int non_tile_overlap_x = p_tile_size_x - tile_overlap_x;
|
||||
|
||||
int tile_overlap_y = (int32_t)(p_tile_size_y * tile_overlap_factor_y);
|
||||
int non_tile_overlap_y = p_tile_size_y - tile_overlap_y;
|
||||
|
||||
int tile_size_x = p_tile_size_x < small_width ? p_tile_size_x : small_width;
|
||||
int tile_size_y = p_tile_size_y < small_height ? p_tile_size_y : small_height;
|
||||
|
||||
int input_tile_size_x = tile_size_x;
|
||||
int input_tile_size_y = tile_size_y;
|
||||
int output_tile_size_x = tile_size_x;
|
||||
int output_tile_size_y = tile_size_y;
|
||||
|
||||
if (decode) {
|
||||
output_tile_size_x *= scale;
|
||||
output_tile_size_y *= scale;
|
||||
} else {
|
||||
input_tile_size_x *= scale;
|
||||
input_tile_size_y *= scale;
|
||||
}
|
||||
|
||||
struct ggml_init_params params = {};
|
||||
params.mem_size += tile_size * tile_size * input->ne[2] * sizeof(float); // input chunk
|
||||
params.mem_size += (tile_size * scale) * (tile_size * scale) * output->ne[2] * sizeof(float); // output chunk
|
||||
params.mem_size += input_tile_size_x * input_tile_size_y * input->ne[2] * sizeof(float); // input chunk
|
||||
params.mem_size += output_tile_size_x * output_tile_size_y * output->ne[2] * sizeof(float); // output chunk
|
||||
params.mem_size += 3 * ggml_tensor_overhead();
|
||||
params.mem_buffer = NULL;
|
||||
params.no_alloc = false;
|
||||
@ -775,29 +858,50 @@ __STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const
|
||||
}
|
||||
|
||||
// tiling
|
||||
ggml_tensor* input_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, tile_size, tile_size, input->ne[2], 1);
|
||||
ggml_tensor* output_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, tile_size * scale, tile_size * scale, output->ne[2], 1);
|
||||
on_processing(input_tile, NULL, true);
|
||||
int num_tiles = ceil((float)input_width / non_tile_overlap) * ceil((float)input_height / non_tile_overlap);
|
||||
ggml_tensor* input_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, input_tile_size_x, input_tile_size_y, input->ne[2], 1);
|
||||
ggml_tensor* output_tile = ggml_new_tensor_4d(tiles_ctx, GGML_TYPE_F32, output_tile_size_x, output_tile_size_y, output->ne[2], 1);
|
||||
int num_tiles = num_tiles_x * num_tiles_y;
|
||||
LOG_INFO("processing %i tiles", num_tiles);
|
||||
pretty_progress(1, num_tiles, 0.0f);
|
||||
pretty_progress(0, num_tiles, 0.0f);
|
||||
int tile_count = 1;
|
||||
bool last_y = false, last_x = false;
|
||||
float last_time = 0.0f;
|
||||
for (int y = 0; y < input_height && !last_y; y += non_tile_overlap) {
|
||||
if (y + tile_size >= input_height) {
|
||||
y = input_height - tile_size;
|
||||
for (int y = 0; y < small_height && !last_y; y += non_tile_overlap_y) {
|
||||
int dy = 0;
|
||||
if (y + tile_size_y >= small_height) {
|
||||
int _y = y;
|
||||
y = small_height - tile_size_y;
|
||||
dy = _y - y;
|
||||
if (decode) {
|
||||
dy *= scale;
|
||||
}
|
||||
last_y = true;
|
||||
}
|
||||
for (int x = 0; x < input_width && !last_x; x += non_tile_overlap) {
|
||||
if (x + tile_size >= input_width) {
|
||||
x = input_width - tile_size;
|
||||
for (int x = 0; x < small_width && !last_x; x += non_tile_overlap_x) {
|
||||
int dx = 0;
|
||||
if (x + tile_size_x >= small_width) {
|
||||
int _x = x;
|
||||
x = small_width - tile_size_x;
|
||||
dx = _x - x;
|
||||
if (decode) {
|
||||
dx *= scale;
|
||||
}
|
||||
last_x = true;
|
||||
}
|
||||
|
||||
int x_in = decode ? x : scale * x;
|
||||
int y_in = decode ? y : scale * y;
|
||||
int x_out = decode ? x * scale : x;
|
||||
int y_out = decode ? y * scale : y;
|
||||
|
||||
int overlap_x_out = decode ? tile_overlap_x * scale : tile_overlap_x;
|
||||
int overlap_y_out = decode ? tile_overlap_y * scale : tile_overlap_y;
|
||||
|
||||
int64_t t1 = ggml_time_ms();
|
||||
ggml_split_tensor_2d(input, input_tile, x, y);
|
||||
ggml_split_tensor_2d(input, input_tile, x_in, y_in);
|
||||
on_processing(input_tile, output_tile, false);
|
||||
ggml_merge_tensor_2d(output_tile, output, x * scale, y * scale, tile_overlap * scale);
|
||||
ggml_merge_tensor_2d(output_tile, output, x_out, y_out, overlap_x_out, overlap_y_out, dx, dy);
|
||||
|
||||
int64_t t2 = ggml_time_ms();
|
||||
last_time = (t2 - t1) / 1000.0f;
|
||||
pretty_progress(tile_count, num_tiles, last_time);
|
||||
@ -811,6 +915,15 @@ __STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const
|
||||
ggml_free(tiles_ctx);
|
||||
}
|
||||
|
||||
__STATIC_INLINE__ void sd_tiling(ggml_tensor* input,
|
||||
ggml_tensor* output,
|
||||
const int scale,
|
||||
const int tile_size,
|
||||
const float tile_overlap_factor,
|
||||
on_tile_process on_processing) {
|
||||
sd_tiling_non_square(input, output, scale, tile_size, tile_size, tile_overlap_factor, on_processing);
|
||||
}
|
||||
|
||||
__STATIC_INLINE__ struct ggml_tensor* ggml_group_norm_32(struct ggml_context* ctx,
|
||||
struct ggml_tensor* a) {
|
||||
const float eps = 1e-6f; // default eps parameter
|
||||
|
||||
@ -108,10 +108,10 @@ public:
|
||||
std::shared_ptr<PhotoMakerIDEmbed> pmid_id_embeds;
|
||||
|
||||
std::string taesd_path;
|
||||
bool use_tiny_autoencoder = false;
|
||||
bool vae_tiling = false;
|
||||
bool offload_params_to_cpu = false;
|
||||
bool stacked_id = false;
|
||||
bool use_tiny_autoencoder = false;
|
||||
sd_tiling_params_t vae_tiling_params = {false, 0, 0, 0.5f, 0, 0};
|
||||
bool offload_params_to_cpu = false;
|
||||
bool stacked_id = false;
|
||||
|
||||
bool is_using_v_parameterization = false;
|
||||
bool is_using_edm_v_parameterization = false;
|
||||
@ -183,7 +183,6 @@ public:
|
||||
lora_model_dir = SAFE_STR(sd_ctx_params->lora_model_dir);
|
||||
taesd_path = SAFE_STR(sd_ctx_params->taesd_path);
|
||||
use_tiny_autoencoder = taesd_path.size() > 0;
|
||||
vae_tiling = sd_ctx_params->vae_tiling;
|
||||
offload_params_to_cpu = sd_ctx_params->offload_params_to_cpu;
|
||||
|
||||
if (sd_ctx_params->rng_type == STD_DEFAULT_RNG) {
|
||||
@ -1297,15 +1296,77 @@ public:
|
||||
return latent;
|
||||
}
|
||||
|
||||
ggml_tensor* encode_first_stage(ggml_context* work_ctx, ggml_tensor* x, bool decode_video = false) {
|
||||
void get_tile_sizes(int& tile_size_x,
|
||||
int& tile_size_y,
|
||||
float& tile_overlap,
|
||||
const sd_tiling_params_t& params,
|
||||
int latent_x,
|
||||
int latent_y,
|
||||
float encoding_factor = 1.0f) {
|
||||
tile_overlap = std::max(std::min(params.target_overlap, 0.5f), 0.0f);
|
||||
auto get_tile_size = [&](int requested_size, float factor, int latent_size) {
|
||||
const int default_tile_size = 32;
|
||||
const int min_tile_dimension = 4;
|
||||
int tile_size = default_tile_size;
|
||||
// factor <= 1 means simple fraction of the latent dimension
|
||||
// factor > 1 means number of tiles across that dimension
|
||||
if (factor > 0.f) {
|
||||
if (factor > 1.0)
|
||||
factor = 1 / (factor - factor * tile_overlap + tile_overlap);
|
||||
tile_size = std::round(latent_size * factor);
|
||||
} else if (requested_size >= min_tile_dimension) {
|
||||
tile_size = requested_size;
|
||||
}
|
||||
tile_size *= encoding_factor;
|
||||
return std::max(std::min(tile_size, latent_size), min_tile_dimension);
|
||||
};
|
||||
|
||||
tile_size_x = get_tile_size(params.tile_size_x, params.rel_size_x, latent_x);
|
||||
tile_size_y = get_tile_size(params.tile_size_y, params.rel_size_y, latent_y);
|
||||
}
|
||||
|
||||
ggml_tensor* encode_first_stage(ggml_context* work_ctx, ggml_tensor* x, bool encode_video = false) {
|
||||
int64_t t0 = ggml_time_ms();
|
||||
ggml_tensor* result = NULL;
|
||||
int W = x->ne[0] / 8;
|
||||
int H = x->ne[1] / 8;
|
||||
if (vae_tiling_params.enabled && !encode_video) {
|
||||
// TODO wan2.2 vae support?
|
||||
int C = sd_version_is_dit(version) ? 16 : 4;
|
||||
if (!use_tiny_autoencoder) {
|
||||
C *= 2;
|
||||
}
|
||||
result = ggml_new_tensor_4d(work_ctx, GGML_TYPE_F32, W, H, C, x->ne[3]);
|
||||
}
|
||||
|
||||
if (!use_tiny_autoencoder) {
|
||||
float tile_overlap;
|
||||
int tile_size_x, tile_size_y;
|
||||
// multiply tile size for encode to keep the compute buffer size consistent
|
||||
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, vae_tiling_params, W, H, 1.30539f);
|
||||
|
||||
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
|
||||
|
||||
process_vae_input_tensor(x);
|
||||
first_stage_model->compute(n_threads, x, false, &result, work_ctx);
|
||||
if (vae_tiling_params.enabled && !encode_video) {
|
||||
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||
first_stage_model->compute(n_threads, in, false, &out, work_ctx);
|
||||
};
|
||||
sd_tiling_non_square(x, result, 8, tile_size_x, tile_size_y, tile_overlap, on_tiling);
|
||||
} else {
|
||||
first_stage_model->compute(n_threads, x, false, &result, work_ctx);
|
||||
}
|
||||
first_stage_model->free_compute_buffer();
|
||||
} else {
|
||||
tae_first_stage->compute(n_threads, x, false, &result, work_ctx);
|
||||
if (vae_tiling_params.enabled && !encode_video) {
|
||||
// split latent in 32x32 tiles and compute in several steps
|
||||
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||
tae_first_stage->compute(n_threads, in, false, &out, NULL);
|
||||
};
|
||||
sd_tiling(x, result, 8, 64, 0.5f, on_tiling);
|
||||
} else {
|
||||
tae_first_stage->compute(n_threads, x, false, &result, work_ctx);
|
||||
}
|
||||
tae_first_stage->free_compute_buffer();
|
||||
}
|
||||
|
||||
@ -1422,24 +1483,29 @@ public:
|
||||
C,
|
||||
x->ne[3]);
|
||||
}
|
||||
|
||||
int64_t t0 = ggml_time_ms();
|
||||
if (!use_tiny_autoencoder) {
|
||||
float tile_overlap;
|
||||
int tile_size_x, tile_size_y;
|
||||
get_tile_sizes(tile_size_x, tile_size_y, tile_overlap, vae_tiling_params, x->ne[0], x->ne[1]);
|
||||
|
||||
LOG_DEBUG("VAE Tile size: %dx%d", tile_size_x, tile_size_y);
|
||||
|
||||
process_latent_out(x);
|
||||
// x = load_tensor_from_file(work_ctx, "wan_vae_z.bin");
|
||||
if (vae_tiling && !decode_video) {
|
||||
if (vae_tiling_params.enabled && !decode_video) {
|
||||
// split latent in 32x32 tiles and compute in several steps
|
||||
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||
first_stage_model->compute(n_threads, in, true, &out, NULL);
|
||||
};
|
||||
sd_tiling(x, result, 8, 32, 0.5f, on_tiling);
|
||||
sd_tiling_non_square(x, result, 8, tile_size_x, tile_size_y, tile_overlap, on_tiling);
|
||||
} else {
|
||||
first_stage_model->compute(n_threads, x, true, &result, work_ctx);
|
||||
}
|
||||
first_stage_model->free_compute_buffer();
|
||||
process_vae_output_tensor(result);
|
||||
} else {
|
||||
if (vae_tiling && !decode_video) {
|
||||
if (vae_tiling_params.enabled && !decode_video) {
|
||||
// split latent in 64x64 tiles and compute in several steps
|
||||
auto on_tiling = [&](ggml_tensor* in, ggml_tensor* out, bool init) {
|
||||
tae_first_stage->compute(n_threads, in, true, &out);
|
||||
@ -1561,7 +1627,6 @@ enum scheduler_t str_to_schedule(const char* str) {
|
||||
void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) {
|
||||
*sd_ctx_params = {};
|
||||
sd_ctx_params->vae_decode_only = true;
|
||||
sd_ctx_params->vae_tiling = false;
|
||||
sd_ctx_params->free_params_immediately = true;
|
||||
sd_ctx_params->n_threads = get_num_physical_cores();
|
||||
sd_ctx_params->wtype = SD_TYPE_COUNT;
|
||||
@ -1625,7 +1690,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) {
|
||||
SAFE_STR(sd_ctx_params->embedding_dir),
|
||||
SAFE_STR(sd_ctx_params->stacked_id_embed_dir),
|
||||
BOOL_STR(sd_ctx_params->vae_decode_only),
|
||||
BOOL_STR(sd_ctx_params->vae_tiling),
|
||||
BOOL_STR(sd_ctx_params->free_params_immediately),
|
||||
sd_ctx_params->n_threads,
|
||||
sd_type_name(sd_ctx_params->wtype),
|
||||
@ -1692,16 +1756,17 @@ char* sd_sample_params_to_str(const sd_sample_params_t* sample_params) {
|
||||
void sd_img_gen_params_init(sd_img_gen_params_t* sd_img_gen_params) {
|
||||
*sd_img_gen_params = {};
|
||||
sd_sample_params_init(&sd_img_gen_params->sample_params);
|
||||
sd_img_gen_params->clip_skip = -1;
|
||||
sd_img_gen_params->ref_images_count = 0;
|
||||
sd_img_gen_params->width = 512;
|
||||
sd_img_gen_params->height = 512;
|
||||
sd_img_gen_params->strength = 0.75f;
|
||||
sd_img_gen_params->seed = -1;
|
||||
sd_img_gen_params->batch_count = 1;
|
||||
sd_img_gen_params->control_strength = 0.9f;
|
||||
sd_img_gen_params->style_strength = 20.f;
|
||||
sd_img_gen_params->normalize_input = false;
|
||||
sd_img_gen_params->clip_skip = -1;
|
||||
sd_img_gen_params->ref_images_count = 0;
|
||||
sd_img_gen_params->width = 512;
|
||||
sd_img_gen_params->height = 512;
|
||||
sd_img_gen_params->strength = 0.75f;
|
||||
sd_img_gen_params->seed = -1;
|
||||
sd_img_gen_params->batch_count = 1;
|
||||
sd_img_gen_params->control_strength = 0.9f;
|
||||
sd_img_gen_params->style_strength = 20.f;
|
||||
sd_img_gen_params->normalize_input = false;
|
||||
sd_img_gen_params->vae_tiling_params = {false, 0, 0, 0.5f, 0.0f, 0.0f};
|
||||
}
|
||||
|
||||
char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) {
|
||||
@ -1721,6 +1786,7 @@ char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) {
|
||||
"sample_params: %s\n"
|
||||
"strength: %.2f\n"
|
||||
"seed: %" PRId64
|
||||
"VAE tiling:"
|
||||
"\n"
|
||||
"batch_count: %d\n"
|
||||
"ref_images_count: %d\n"
|
||||
@ -1737,6 +1803,7 @@ char* sd_img_gen_params_to_str(const sd_img_gen_params_t* sd_img_gen_params) {
|
||||
SAFE_STR(sample_params_str),
|
||||
sd_img_gen_params->strength,
|
||||
sd_img_gen_params->seed,
|
||||
BOOL_STR(sd_img_gen_params->vae_tiling_params.enabled),
|
||||
sd_img_gen_params->batch_count,
|
||||
sd_img_gen_params->ref_images_count,
|
||||
BOOL_STR(sd_img_gen_params->increase_ref_index),
|
||||
@ -2173,8 +2240,9 @@ ggml_tensor* generate_init_latent(sd_ctx_t* sd_ctx,
|
||||
}
|
||||
|
||||
sd_image_t* generate_image(sd_ctx_t* sd_ctx, const sd_img_gen_params_t* sd_img_gen_params) {
|
||||
int width = sd_img_gen_params->width;
|
||||
int height = sd_img_gen_params->height;
|
||||
sd_ctx->sd->vae_tiling_params = sd_img_gen_params->vae_tiling_params;
|
||||
int width = sd_img_gen_params->width;
|
||||
int height = sd_img_gen_params->height;
|
||||
if (sd_version_is_dit(sd_ctx->sd->version)) {
|
||||
if (width % 16 || height % 16) {
|
||||
LOG_ERROR("Image dimensions must be must be a multiple of 16 on each axis for %s models. (Got %dx%d)",
|
||||
|
||||
@ -114,6 +114,15 @@ enum sd_log_level_t {
|
||||
SD_LOG_ERROR
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
bool enabled;
|
||||
int tile_size_x;
|
||||
int tile_size_y;
|
||||
float target_overlap;
|
||||
float rel_size_x;
|
||||
float rel_size_y;
|
||||
} sd_tiling_params_t;
|
||||
|
||||
typedef struct {
|
||||
const char* model_path;
|
||||
const char* clip_l_path;
|
||||
@ -129,7 +138,6 @@ typedef struct {
|
||||
const char* embedding_dir;
|
||||
const char* stacked_id_embed_dir;
|
||||
bool vae_decode_only;
|
||||
bool vae_tiling;
|
||||
bool free_params_immediately;
|
||||
int n_threads;
|
||||
enum sd_type_t wtype;
|
||||
@ -197,6 +205,7 @@ typedef struct {
|
||||
float style_strength;
|
||||
bool normalize_input;
|
||||
const char* input_id_images_path;
|
||||
sd_tiling_params_t vae_tiling_params;
|
||||
} sd_img_gen_params_t;
|
||||
|
||||
typedef struct {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user