mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2026-03-24 02:08:51 +00:00
refactor: reorganize the vocab file structure (#1271)
This commit is contained in:
parent
adea272225
commit
636d3cb6ff
@ -90,6 +90,8 @@ file(GLOB SD_LIB_SOURCES
|
||||
"src/*.h"
|
||||
"src/*.cpp"
|
||||
"src/*.hpp"
|
||||
"src/vocab/*.h"
|
||||
"src/vocab/*.cpp"
|
||||
)
|
||||
|
||||
find_program(GIT_EXE NAMES git git.exe NO_CMAKE_FIND_ROOT_PATH)
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
for f in *.cpp *.h *.hpp examples/cli/*.cpp examples/common/*.hpp examples/cli/*.h examples/server/*.cpp; do
|
||||
for f in src/*.cpp src/*.h src/*.hpp src/vocab/*.h src/vocab/*.cpp examples/cli/*.cpp examples/common/*.hpp examples/cli/*.h examples/server/*.cpp; do
|
||||
[[ "$f" == vocab* ]] && continue
|
||||
echo "formatting '$f'"
|
||||
# if [ "$f" != "stable-diffusion.h" ]; then
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
#include "ggml_extend.hpp"
|
||||
#include "model.h"
|
||||
#include "tokenize_util.h"
|
||||
#include "vocab/vocab.h"
|
||||
|
||||
/*================================================== CLIPTokenizer ===================================================*/
|
||||
|
||||
@ -110,7 +111,7 @@ public:
|
||||
if (merges_utf8_str.size() > 0) {
|
||||
load_from_merges(merges_utf8_str);
|
||||
} else {
|
||||
load_from_merges(ModelLoader::load_merges());
|
||||
load_from_merges(load_clip_merges());
|
||||
}
|
||||
add_special_token("<|startoftext|>");
|
||||
add_special_token("<|endoftext|>");
|
||||
|
||||
@ -19,6 +19,7 @@
|
||||
#include "json.hpp"
|
||||
#include "rope.hpp"
|
||||
#include "tokenize_util.h"
|
||||
#include "vocab/vocab.h"
|
||||
|
||||
namespace LLM {
|
||||
constexpr int LLM_GRAPH_SIZE = 10240;
|
||||
@ -365,7 +366,7 @@ namespace LLM {
|
||||
if (merges_utf8_str.size() > 0) {
|
||||
load_from_merges(merges_utf8_str);
|
||||
} else {
|
||||
load_from_merges(ModelLoader::load_qwen2_merges());
|
||||
load_from_merges(load_qwen2_merges());
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -466,7 +467,7 @@ namespace LLM {
|
||||
if (merges_utf8_str.size() > 0 && vocab_utf8_str.size() > 0) {
|
||||
load_from_merges(merges_utf8_str, vocab_utf8_str);
|
||||
} else {
|
||||
load_from_merges(ModelLoader::load_mistral_merges(), ModelLoader::load_mistral_vocab_json());
|
||||
load_from_merges(load_mistral_merges(), load_mistral_vocab_json());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -16,10 +16,6 @@
|
||||
#include "model.h"
|
||||
#include "stable-diffusion.h"
|
||||
#include "util.h"
|
||||
#include "vocab.hpp"
|
||||
#include "vocab_mistral.hpp"
|
||||
#include "vocab_qwen.hpp"
|
||||
#include "vocab_umt5.hpp"
|
||||
|
||||
#include "ggml-alloc.h"
|
||||
#include "ggml-backend.h"
|
||||
@ -1340,36 +1336,6 @@ void ModelLoader::set_wtype_override(ggml_type wtype, std::string tensor_type_ru
|
||||
}
|
||||
}
|
||||
|
||||
std::string ModelLoader::load_merges() {
|
||||
std::string merges_utf8_str(reinterpret_cast<const char*>(merges_utf8_c_str), sizeof(merges_utf8_c_str));
|
||||
return merges_utf8_str;
|
||||
}
|
||||
|
||||
std::string ModelLoader::load_qwen2_merges() {
|
||||
std::string merges_utf8_str(reinterpret_cast<const char*>(qwen2_merges_utf8_c_str), sizeof(qwen2_merges_utf8_c_str));
|
||||
return merges_utf8_str;
|
||||
}
|
||||
|
||||
std::string ModelLoader::load_mistral_merges() {
|
||||
std::string merges_utf8_str(reinterpret_cast<const char*>(mistral_merges_utf8_c_str), sizeof(mistral_merges_utf8_c_str));
|
||||
return merges_utf8_str;
|
||||
}
|
||||
|
||||
std::string ModelLoader::load_mistral_vocab_json() {
|
||||
std::string json_str(reinterpret_cast<const char*>(mistral_vocab_json_utf8_c_str), sizeof(mistral_vocab_json_utf8_c_str));
|
||||
return json_str;
|
||||
}
|
||||
|
||||
std::string ModelLoader::load_t5_tokenizer_json() {
|
||||
std::string json_str(reinterpret_cast<const char*>(t5_tokenizer_json_str), sizeof(t5_tokenizer_json_str));
|
||||
return json_str;
|
||||
}
|
||||
|
||||
std::string ModelLoader::load_umt5_tokenizer_json() {
|
||||
std::string json_str(reinterpret_cast<const char*>(umt5_tokenizer_json_str), sizeof(umt5_tokenizer_json_str));
|
||||
return json_str;
|
||||
}
|
||||
|
||||
bool ModelLoader::load_tensors(on_new_tensor_cb_t on_new_tensor_cb, int n_threads_p, bool enable_mmap) {
|
||||
int64_t process_time_ms = 0;
|
||||
std::atomic<int64_t> read_time_ms(0);
|
||||
|
||||
@ -331,13 +331,6 @@ public:
|
||||
bool tensor_should_be_converted(const TensorStorage& tensor_storage, ggml_type type);
|
||||
int64_t get_params_mem_size(ggml_backend_t backend, ggml_type type = GGML_TYPE_COUNT);
|
||||
~ModelLoader() = default;
|
||||
|
||||
static std::string load_merges();
|
||||
static std::string load_qwen2_merges();
|
||||
static std::string load_mistral_merges();
|
||||
static std::string load_mistral_vocab_json();
|
||||
static std::string load_t5_tokenizer_json();
|
||||
static std::string load_umt5_tokenizer_json();
|
||||
};
|
||||
|
||||
#endif // __MODEL_H__
|
||||
|
||||
@ -14,6 +14,7 @@
|
||||
#include "ggml_extend.hpp"
|
||||
#include "json.hpp"
|
||||
#include "model.h"
|
||||
#include "vocab/vocab.h"
|
||||
|
||||
// Port from: https://github.com/google/sentencepiece/blob/master/src/unigram_model.h
|
||||
// and https://github.com/google/sentencepiece/blob/master/src/unigram_model.h.
|
||||
@ -341,9 +342,9 @@ protected:
|
||||
public:
|
||||
explicit T5UniGramTokenizer(bool is_umt5 = false) {
|
||||
if (is_umt5) {
|
||||
InitializePieces(ModelLoader::load_umt5_tokenizer_json());
|
||||
InitializePieces(load_umt5_tokenizer_json());
|
||||
} else {
|
||||
InitializePieces(ModelLoader::load_t5_tokenizer_json());
|
||||
InitializePieces(load_t5_tokenizer_json());
|
||||
}
|
||||
|
||||
min_score_ = FLT_MAX;
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
static unsigned char merges_utf8_c_str[] = {
|
||||
static const unsigned char clip_merges_utf8_c_str[] = {
|
||||
0x23,
|
||||
0x76,
|
||||
0x65,
|
||||
@ -524620,7 +524620,7 @@ static unsigned char merges_utf8_c_str[] = {
|
||||
0x0a,
|
||||
};
|
||||
|
||||
static unsigned char t5_tokenizer_json_str[] = {
|
||||
static const unsigned char t5_tokenizer_json_str[] = {
|
||||
0x7b,
|
||||
0x0a,
|
||||
0x20,
|
||||
@ -1,4 +1,4 @@
|
||||
unsigned char mistral_merges_utf8_c_str[] = {
|
||||
static const unsigned char mistral_merges_utf8_c_str[] = {
|
||||
0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0x20, 0x74, 0x0a, 0x65,
|
||||
0x20, 0x72, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0xc4, 0xa0,
|
||||
0xc4, 0xa0, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0,
|
||||
@ -260614,7 +260614,7 @@ unsigned char mistral_merges_utf8_c_str[] = {
|
||||
0xc3, 0xa5, 0xc4, 0xb2, 0xc4, 0xb0, 0x20, 0xc3, 0xa6, 0xc2, 0xb1, 0xc4,
|
||||
0xab, 0xc3, 0xa4, 0xc2, 0xb9, 0xc2, 0xa6, 0x0a,
|
||||
};
|
||||
unsigned char mistral_vocab_json_utf8_c_str[] = {
|
||||
static const unsigned char mistral_vocab_json_utf8_c_str[] = {
|
||||
0x7b, 0x22, 0x3c, 0x75, 0x6e, 0x6b, 0x3e, 0x22, 0x3a, 0x20, 0x30, 0x2c,
|
||||
0x20, 0x22, 0x3c, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x31, 0x2c, 0x20, 0x22,
|
||||
0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x3a, 0x20, 0x32, 0x2c, 0x20, 0x22, 0x5b,
|
||||
@ -1,4 +1,4 @@
|
||||
unsigned char qwen2_merges_utf8_c_str[] = {
|
||||
static const unsigned char qwen2_merges_utf8_c_str[] = {
|
||||
0xc4, 0xa0, 0x20, 0xc4, 0xa0, 0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4,
|
||||
0xa0, 0xc4, 0xa0, 0x0a, 0x69, 0x20, 0x6e, 0x0a, 0xc4, 0xa0, 0x20, 0x74,
|
||||
0x0a, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0xc4, 0xa0, 0x20, 0xc4, 0xa0,
|
||||
@ -1,4 +1,4 @@
|
||||
unsigned char umt5_tokenizer_json_str[] = {
|
||||
static const unsigned char umt5_tokenizer_json_str[] = {
|
||||
0x7b, 0x22, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20,
|
||||
0x22, 0x31, 0x2e, 0x30, 0x22, 0x2c, 0x20, 0x22, 0x74, 0x72, 0x75, 0x6e,
|
||||
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x20, 0x6e, 0x75, 0x6c,
|
||||
35
src/vocab/vocab.cpp
Normal file
35
src/vocab/vocab.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
#include "vocab.h"
|
||||
#include "clip_t5.hpp"
|
||||
#include "mistral.hpp"
|
||||
#include "qwen.hpp"
|
||||
#include "umt5.hpp"
|
||||
|
||||
std::string load_clip_merges() {
|
||||
std::string merges_utf8_str(reinterpret_cast<const char*>(clip_merges_utf8_c_str), sizeof(clip_merges_utf8_c_str));
|
||||
return merges_utf8_str;
|
||||
}
|
||||
|
||||
std::string load_qwen2_merges() {
|
||||
std::string merges_utf8_str(reinterpret_cast<const char*>(qwen2_merges_utf8_c_str), sizeof(qwen2_merges_utf8_c_str));
|
||||
return merges_utf8_str;
|
||||
}
|
||||
|
||||
std::string load_mistral_merges() {
|
||||
std::string merges_utf8_str(reinterpret_cast<const char*>(mistral_merges_utf8_c_str), sizeof(mistral_merges_utf8_c_str));
|
||||
return merges_utf8_str;
|
||||
}
|
||||
|
||||
std::string load_mistral_vocab_json() {
|
||||
std::string json_str(reinterpret_cast<const char*>(mistral_vocab_json_utf8_c_str), sizeof(mistral_vocab_json_utf8_c_str));
|
||||
return json_str;
|
||||
}
|
||||
|
||||
std::string load_t5_tokenizer_json() {
|
||||
std::string json_str(reinterpret_cast<const char*>(t5_tokenizer_json_str), sizeof(t5_tokenizer_json_str));
|
||||
return json_str;
|
||||
}
|
||||
|
||||
std::string load_umt5_tokenizer_json() {
|
||||
std::string json_str(reinterpret_cast<const char*>(umt5_tokenizer_json_str), sizeof(umt5_tokenizer_json_str));
|
||||
return json_str;
|
||||
}
|
||||
13
src/vocab/vocab.h
Normal file
13
src/vocab/vocab.h
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef __VOCAB_H__
|
||||
#define __VOCAB_H__
|
||||
|
||||
#include <string>
|
||||
|
||||
std::string load_clip_merges();
|
||||
std::string load_qwen2_merges();
|
||||
std::string load_mistral_merges();
|
||||
std::string load_mistral_vocab_json();
|
||||
std::string load_t5_tokenizer_json();
|
||||
std::string load_umt5_tokenizer_json();
|
||||
|
||||
#endif // __VOCAB_H__
|
||||
Loading…
x
Reference in New Issue
Block a user