mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2025-12-13 05:48:56 +00:00
remove redundant model loading log
This commit is contained in:
parent
5ed25a8cdd
commit
854fa231f1
@ -1966,7 +1966,7 @@ std::vector<TensorStorage> remove_duplicates(const std::vector<TensorStorage>& v
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb) {
|
bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb) {
|
||||||
int64_t process_time_ms = 0;
|
int64_t process_time_ms = 0;
|
||||||
int64_t read_time_ms = 0;
|
int64_t read_time_ms = 0;
|
||||||
int64_t memcpy_time_ms = 0;
|
int64_t memcpy_time_ms = 0;
|
||||||
int64_t copy_to_backend_time_ms = 0;
|
int64_t copy_to_backend_time_ms = 0;
|
||||||
@ -1989,7 +1989,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb) {
|
|||||||
std::vector<TensorStorage> dedup = remove_duplicates(processed_tensor_storages);
|
std::vector<TensorStorage> dedup = remove_duplicates(processed_tensor_storages);
|
||||||
processed_tensor_storages = dedup;
|
processed_tensor_storages = dedup;
|
||||||
curr_time_ms = ggml_time_ms();
|
curr_time_ms = ggml_time_ms();
|
||||||
process_time_ms = curr_time_ms - prev_time_ms;
|
process_time_ms = curr_time_ms - prev_time_ms;
|
||||||
prev_time_ms = curr_time_ms;
|
prev_time_ms = curr_time_ms;
|
||||||
|
|
||||||
bool success = true;
|
bool success = true;
|
||||||
|
|||||||
@ -557,8 +557,6 @@ public:
|
|||||||
// load weights
|
// load weights
|
||||||
LOG_DEBUG("loading weights");
|
LOG_DEBUG("loading weights");
|
||||||
|
|
||||||
int64_t t0 = ggml_time_ms();
|
|
||||||
|
|
||||||
std::set<std::string> ignore_tensors;
|
std::set<std::string> ignore_tensors;
|
||||||
tensors["alphas_cumprod"] = alphas_cumprod_tensor;
|
tensors["alphas_cumprod"] = alphas_cumprod_tensor;
|
||||||
if (use_tiny_autoencoder) {
|
if (use_tiny_autoencoder) {
|
||||||
@ -656,11 +654,7 @@ public:
|
|||||||
ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM");
|
ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM");
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t t1 = ggml_time_ms();
|
|
||||||
LOG_INFO("loading model from '%s' completed, taking %.2fs", SAFE_STR(sd_ctx_params->model_path), (t1 - t0) * 1.0f / 1000);
|
|
||||||
|
|
||||||
// check is_using_v_parameterization_for_sd2
|
// check is_using_v_parameterization_for_sd2
|
||||||
|
|
||||||
if (sd_version_is_sd2(version)) {
|
if (sd_version_is_sd2(version)) {
|
||||||
if (is_using_v_parameterization_for_sd2(ctx, sd_version_is_inpaint(version))) {
|
if (is_using_v_parameterization_for_sd2(ctx, sd_version_is_inpaint(version))) {
|
||||||
is_using_v_parameterization = true;
|
is_using_v_parameterization = true;
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user