Skip to content
This repository has been archived by the owner on Jun 24, 2024. It is now read-only.

Commit

Permalink
code cleanup that doesn't change anything
Browse files Browse the repository at this point in the history
  • Loading branch information
iacore committed Apr 7, 2023
1 parent 53ba1a9 commit 071612e
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 16 deletions.
46 changes: 31 additions & 15 deletions llama-rs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -598,13 +598,10 @@ impl Model {
let main_path = path.as_ref();

let file = File::open(main_path).map_err(|e| LoadError::OpenFileFailed {
source: e,
path: main_path.to_owned(),
})?;
let mut reader =
BufReader::new(
&file,
);
source: e,
path: main_path.to_owned(),
})?;
let mut reader = BufReader::new(&file);

// Verify magic
let model_type: ModelType = match read_u32(&mut reader)? {
Expand Down Expand Up @@ -666,13 +663,21 @@ impl Model {
ModelType::GGMF | ModelType::Unversioned => read_i32(&mut reader)? as usize,
ModelType::GGJT => read_u32(&mut reader)? as usize,
};
if let Ok(word) = read_string(&mut reader, len) {
max_token_length = max_token_length.max(word.len());
id_to_token.push(word.clone());
token_to_id.insert(word, TokenId::try_from(i)?);
let maybe_word = if len > 0 {
read_string(&mut reader, len)
} else {
load_progress_callback(LoadProgress::BadToken { index: i });
id_to_token.push("�".to_string());
Ok("".into())
};
match maybe_word {
Ok(word) => {
max_token_length = max_token_length.max(word.len());
id_to_token.push(word.clone());
token_to_id.insert(word, TokenId::try_from(i)?);
}
Err(_e) => {
load_progress_callback(LoadProgress::BadToken { index: i });
id_to_token.push("�".to_string());
}
}

// Token score, currently unused
Expand Down Expand Up @@ -817,11 +822,22 @@ impl Model {
ModelType::GGMF | ModelType::Unversioned => {
let file_offset = reader.stream_position()?;
drop(reader);
load_weights_ggmf_or_unversioned(file_offset, main_path, load_progress_callback, &model)?
load_weights_ggmf_or_unversioned(
file_offset,
main_path,
load_progress_callback,
&model,
)?
}
ModelType::GGJT => {
let mmap = unsafe { Mmap::map(&file)? };
load_weights_ggjt(&mut reader, &mmap, main_path, load_progress_callback, &model)?;
load_weights_ggjt(
&mut reader,
&mmap,
main_path,
load_progress_callback,
&model,
)?;
model.mmap = Some(mmap);
}
}
Expand Down
11 changes: 10 additions & 1 deletion llama-rs/src/loader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,16 @@ pub(crate) fn load_weights_ggjt(
});
}

_ = tensor_type_size(ftype, ne);
match tensor_type_size(ftype, ne) {
Some(_) => {},
None => {
return Err(LoadError::InvalidFtype {
tensor_name,
ftype,
path: path.to_owned(),
});
}
};

let offset_curr = reader.stream_position()?;
let offset_aligned: u64 = (offset_curr + 31) & (31 ^ u64::MAX);
Expand Down

0 comments on commit 071612e

Please sign in to comment.