diff --git a/fastchat/model/compression.py b/fastchat/model/compression.py index 16699ccff..108311f71 100644 --- a/fastchat/model/compression.py +++ b/fastchat/model/compression.py @@ -11,7 +11,7 @@ from torch.nn import functional as F import torch.nn as nn from tqdm import tqdm -from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer,AutoModel +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoModel @dataclasses.dataclass @@ -105,10 +105,12 @@ def load_compress_model(model_path, device, torch_dtype, use_fast, revision="mai # `use_fast=True`` is not supported for some models. try: tokenizer = AutoTokenizer.from_pretrained( - model_path, use_fast=use_fast, revision=revision,trust_remote_code=True) + model_path, use_fast=use_fast, revision=revision, trust_remote_code=True + ) except TypeError: tokenizer = AutoTokenizer.from_pretrained( - model_path, use_fast=False, revision=revision,trust_remote_code=True) + model_path, use_fast=False, revision=revision, trust_remote_code=True + ) with init_empty_weights(): # `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel config = AutoConfig.from_pretrained( @@ -117,26 +119,27 @@ def load_compress_model(model_path, device, torch_dtype, use_fast, revision="mai torch_dtype=torch_dtype, trust_remote_code=True, revision=revision, - trust_remote_code=True ) - # some models are loaded by AutoModel but not AutoModelForCausalLM, + # some models are loaded by AutoModel but not AutoModelForCausalLM, # such as chatglm, chatglm2 try: - model = AutoModelForCausalLM.from_config(config,trust_remote_code=True) + model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) except NameError: - model = AutoModel.from_config(config,trust_remote_code=True) + model = AutoModel.from_config(config, trust_remote_code=True) linear_weights = get_compressed_list(model) if os.path.exists(model_path): # `model_path` is a local folder base_pattern = os.path.join(model_path, "pytorch_model*.bin") else: # `model_path` is a cached Hugging Face repo - # We don't necessarily need to download the model' repo again if there is a cache. + # We don't necessarily need to download the model' repo again if there is a cache. # So check the default huggingface cache first. - model_path_temp = os.path.join(os.getenv("HOME"), - ".cache/huggingface/hub", - "models--"+model_path.replace("/","--"), - "snapshots/") + model_path_temp = os.path.join( + os.getenv("HOME"), + ".cache/huggingface/hub", + "models--" + model_path.replace("/", "--"), + "snapshots/", + ) if os.path.exists(model_path_temp): temp_last_dir = os.listdir(model_path_temp)[-1] model_path = os.path.join(model_path_temp, temp_last_dir) diff --git a/fastchat/model/model_adapter.py b/fastchat/model/model_adapter.py index 01c47cf0f..5a417a275 100644 --- a/fastchat/model/model_adapter.py +++ b/fastchat/model/model_adapter.py @@ -677,9 +677,9 @@ class OasstLLaMAAdapter(BaseModelAdapter): use_fast_tokenizer = False def match(self, model_path: str): - if "openassistant-sft-7-llama-30b-hf" in model_path.lower(): - return True model_path = model_path.lower() + if "openassistant-sft-7-llama-30b-hf" in model_path: + return True return "oasst" in model_path and "pythia" not in model_path def get_default_conv_template(self, model_path: str) -> Conversation: