chore: add sd_ prefix to a few functions (#967)

This commit is contained in:
Wagner Bruna 2025-12-01 11:43:52 -03:00 committed by GitHub
parent 0743a1b3b5
commit e4c50f1de5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 11 additions and 11 deletions

View File

@ -1306,7 +1306,7 @@ void parse_args(int argc, const char** argv, SDParams& params) {
} }
if (params.n_threads <= 0) { if (params.n_threads <= 0) {
params.n_threads = get_num_physical_cores(); params.n_threads = sd_get_num_physical_cores();
} }
if ((params.mode == IMG_GEN || params.mode == VID_GEN) && params.prompt.length() == 0) { if ((params.mode == IMG_GEN || params.mode == VID_GEN) && params.prompt.length() == 0) {

View File

@ -265,8 +265,8 @@ void convert_tensor(void* src,
} else { } else {
auto qtype = ggml_get_type_traits(src_type); auto qtype = ggml_get_type_traits(src_type);
if (qtype->to_float == nullptr) { if (qtype->to_float == nullptr) {
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", throw std::runtime_error(sd_format("type %s unsupported for integer quantization: no dequantization available",
ggml_type_name(src_type))); ggml_type_name(src_type)));
} }
qtype->to_float(src, (float*)dst, n); qtype->to_float(src, (float*)dst, n);
} }
@ -275,8 +275,8 @@ void convert_tensor(void* src,
// src_type is quantized => dst_type == GGML_TYPE_F16 or dst_type is quantized // src_type is quantized => dst_type == GGML_TYPE_F16 or dst_type is quantized
auto qtype = ggml_get_type_traits(src_type); auto qtype = ggml_get_type_traits(src_type);
if (qtype->to_float == nullptr) { if (qtype->to_float == nullptr) {
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", throw std::runtime_error(sd_format("type %s unsupported for integer quantization: no dequantization available",
ggml_type_name(src_type))); ggml_type_name(src_type)));
} }
std::vector<char> buf; std::vector<char> buf;
buf.resize(sizeof(float) * n); buf.resize(sizeof(float) * n);
@ -1355,7 +1355,7 @@ bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_thread
std::atomic<int64_t> copy_to_backend_time_ms(0); std::atomic<int64_t> copy_to_backend_time_ms(0);
std::atomic<int64_t> convert_time_ms(0); std::atomic<int64_t> convert_time_ms(0);
int num_threads_to_use = n_threads_p > 0 ? n_threads_p : get_num_physical_cores(); int num_threads_to_use = n_threads_p > 0 ? n_threads_p : sd_get_num_physical_cores();
LOG_DEBUG("using %d threads for model loading", num_threads_to_use); LOG_DEBUG("using %d threads for model loading", num_threads_to_use);
int64_t start_time = ggml_time_ms(); int64_t start_time = ggml_time_ms();

View File

@ -2474,7 +2474,7 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) {
*sd_ctx_params = {}; *sd_ctx_params = {};
sd_ctx_params->vae_decode_only = true; sd_ctx_params->vae_decode_only = true;
sd_ctx_params->free_params_immediately = true; sd_ctx_params->free_params_immediately = true;
sd_ctx_params->n_threads = get_num_physical_cores(); sd_ctx_params->n_threads = sd_get_num_physical_cores();
sd_ctx_params->wtype = SD_TYPE_COUNT; sd_ctx_params->wtype = SD_TYPE_COUNT;
sd_ctx_params->rng_type = CUDA_RNG; sd_ctx_params->rng_type = CUDA_RNG;
sd_ctx_params->sampler_rng_type = RNG_TYPE_COUNT; sd_ctx_params->sampler_rng_type = RNG_TYPE_COUNT;

View File

@ -288,7 +288,7 @@ typedef void (*sd_preview_cb_t)(int step, int frame_count, sd_image_t* frames, b
SD_API void sd_set_log_callback(sd_log_cb_t sd_log_cb, void* data); SD_API void sd_set_log_callback(sd_log_cb_t sd_log_cb, void* data);
SD_API void sd_set_progress_callback(sd_progress_cb_t cb, void* data); SD_API void sd_set_progress_callback(sd_progress_cb_t cb, void* data);
SD_API void sd_set_preview_callback(sd_preview_cb_t cb, enum preview_t mode, int interval, bool denoised, bool noisy, void* data); SD_API void sd_set_preview_callback(sd_preview_cb_t cb, enum preview_t mode, int interval, bool denoised, bool noisy, void* data);
SD_API int32_t get_num_physical_cores(); SD_API int32_t sd_get_num_physical_cores();
SD_API const char* sd_get_system_info(); SD_API const char* sd_get_system_info();
SD_API const char* sd_type_name(enum sd_type_t type); SD_API const char* sd_type_name(enum sd_type_t type);

View File

@ -57,7 +57,7 @@ void replace_all_chars(std::string& str, char target, char replacement) {
} }
} }
std::string format(const char* fmt, ...) { std::string sd_format(const char* fmt, ...) {
va_list ap; va_list ap;
va_list ap2; va_list ap2;
va_start(ap, fmt); va_start(ap, fmt);
@ -148,7 +148,7 @@ std::string get_full_path(const std::string& dir, const std::string& filename) {
// get_num_physical_cores is copy from // get_num_physical_cores is copy from
// https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp // https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp
// LICENSE: https://github.com/ggerganov/llama.cpp/blob/master/LICENSE // LICENSE: https://github.com/ggerganov/llama.cpp/blob/master/LICENSE
int32_t get_num_physical_cores() { int32_t sd_get_num_physical_cores() {
#ifdef __linux__ #ifdef __linux__
// enumerate the set of thread siblings, num entries is num cores // enumerate the set of thread siblings, num entries is num cores
std::unordered_set<std::string> siblings; std::unordered_set<std::string> siblings;

2
util.h
View File

@ -14,7 +14,7 @@ bool ends_with(const std::string& str, const std::string& ending);
bool starts_with(const std::string& str, const std::string& start); bool starts_with(const std::string& str, const std::string& start);
bool contains(const std::string& str, const std::string& substr); bool contains(const std::string& str, const std::string& substr);
std::string format(const char* fmt, ...); std::string sd_format(const char* fmt, ...);
void replace_all_chars(std::string& str, char target, char replacement); void replace_all_chars(std::string& str, char target, char replacement);