diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index fe29c67086158..fbab547d094fe 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -390,17 +390,17 @@ def remove_prefix(text: str, prefix: str) -> str: return text -def get_model(pretrained_model_name_or_path: str): +def get_model(pretrained_model_name_or_path: str) -> str: if os.getenv('VLLM_USE_MODELSCOPE', 'False').lower() == 'true': from modelscope import snapshot_download - else: - from huggingface_hub import snapshot_download - - model_path = snapshot_download( - model_id=pretrained_model_name_or_path, - local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, - ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"]) - return model_path + + model_path = snapshot_download( + model_id=pretrained_model_name_or_path, + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + ignore_file_pattern=[".*.pt", ".*.safetensors", ".*.bin"]) + + return model_path + return pretrained_model_name_or_path def get_tokenizer(