Skip to content

Commit

Permalink
Format code (#2047)
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy authored Jul 21, 2023
1 parent a4b0606 commit 8d8c96c
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 14 deletions.
27 changes: 15 additions & 12 deletions fastchat/model/compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from torch.nn import functional as F
import torch.nn as nn
from tqdm import tqdm
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer,AutoModel
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoModel


@dataclasses.dataclass
Expand Down Expand Up @@ -105,10 +105,12 @@ def load_compress_model(model_path, device, torch_dtype, use_fast, revision="mai
# `use_fast=True`` is not supported for some models.
try:
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=use_fast, revision=revision,trust_remote_code=True)
model_path, use_fast=use_fast, revision=revision, trust_remote_code=True
)
except TypeError:
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=False, revision=revision,trust_remote_code=True)
model_path, use_fast=False, revision=revision, trust_remote_code=True
)
with init_empty_weights():
# `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel
config = AutoConfig.from_pretrained(
Expand All @@ -117,26 +119,27 @@ def load_compress_model(model_path, device, torch_dtype, use_fast, revision="mai
torch_dtype=torch_dtype,
trust_remote_code=True,
revision=revision,
trust_remote_code=True
)
# some models are loaded by AutoModel but not AutoModelForCausalLM,
# some models are loaded by AutoModel but not AutoModelForCausalLM,
# such as chatglm, chatglm2
try:
model = AutoModelForCausalLM.from_config(config,trust_remote_code=True)
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
except NameError:
model = AutoModel.from_config(config,trust_remote_code=True)
model = AutoModel.from_config(config, trust_remote_code=True)
linear_weights = get_compressed_list(model)
if os.path.exists(model_path):
# `model_path` is a local folder
base_pattern = os.path.join(model_path, "pytorch_model*.bin")
else:
# `model_path` is a cached Hugging Face repo
# We don't necessarily need to download the model' repo again if there is a cache.
# We don't necessarily need to download the model' repo again if there is a cache.
# So check the default huggingface cache first.
model_path_temp = os.path.join(os.getenv("HOME"),
".cache/huggingface/hub",
"models--"+model_path.replace("/","--"),
"snapshots/")
model_path_temp = os.path.join(
os.getenv("HOME"),
".cache/huggingface/hub",
"models--" + model_path.replace("/", "--"),
"snapshots/",
)
if os.path.exists(model_path_temp):
temp_last_dir = os.listdir(model_path_temp)[-1]
model_path = os.path.join(model_path_temp, temp_last_dir)
Expand Down
4 changes: 2 additions & 2 deletions fastchat/model/model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,9 +677,9 @@ class OasstLLaMAAdapter(BaseModelAdapter):
use_fast_tokenizer = False

def match(self, model_path: str):
if "openassistant-sft-7-llama-30b-hf" in model_path.lower():
return True
model_path = model_path.lower()
if "openassistant-sft-7-llama-30b-hf" in model_path:
return True
return "oasst" in model_path and "pythia" not in model_path

def get_default_conv_template(self, model_path: str) -> Conversation:
Expand Down

0 comments on commit 8d8c96c

Please sign in to comment.