mirror of
https://github.com/leejet/stable-diffusion.cpp.git
synced 2025-12-12 21:38:58 +00:00
fix: harden for large files (#643)
This commit is contained in:
parent
c607fc3ed4
commit
9e7befa320
@ -107,7 +107,7 @@ const char* unused_tensors[] = {
|
||||
};
|
||||
|
||||
bool is_unused_tensor(std::string name) {
|
||||
for (int i = 0; i < sizeof(unused_tensors) / sizeof(const char*); i++) {
|
||||
for (size_t i = 0; i < sizeof(unused_tensors) / sizeof(const char*); i++) {
|
||||
if (starts_with(name, unused_tensors[i])) {
|
||||
return true;
|
||||
}
|
||||
|
||||
6
model.h
6
model.h
@ -119,7 +119,7 @@ struct TensorStorage {
|
||||
|
||||
size_t file_index = 0;
|
||||
int index_in_zip = -1; // >= means stored in a zip file
|
||||
size_t offset = 0; // offset in file
|
||||
uint64_t offset = 0; // offset in file
|
||||
|
||||
TensorStorage() = default;
|
||||
|
||||
@ -164,10 +164,10 @@ struct TensorStorage {
|
||||
|
||||
std::vector<TensorStorage> chunk(size_t n) {
|
||||
std::vector<TensorStorage> chunks;
|
||||
size_t chunk_size = nbytes_to_read() / n;
|
||||
uint64_t chunk_size = nbytes_to_read() / n;
|
||||
// printf("%d/%d\n", chunk_size, nbytes_to_read());
|
||||
reverse_ne();
|
||||
for (int i = 0; i < n; i++) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
TensorStorage chunk_i = *this;
|
||||
chunk_i.ne[0] = ne[0] / n;
|
||||
chunk_i.offset = offset + i * chunk_size;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user