Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
Bugfix: fix broken of download models from modelscope (vllm-project#5233
Browse files Browse the repository at this point in the history
)

Co-authored-by: mulin.lyh <[email protected]>
  • Loading branch information
2 people authored and robertgshaw2-redhat committed Jun 11, 2024
1 parent 08fd788 commit cbfd3d9
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 2 deletions.
21 changes: 21 additions & 0 deletions tests/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,27 @@ def test_gc():
assert allocated < 50 * 1024 * 1024


def test_model_from_modelscope(monkeypatch):
# model: https://modelscope.cn/models/qwen/Qwen1.5-0.5B-Chat/summary
MODELSCOPE_MODEL_NAME = "qwen/Qwen1.5-0.5B-Chat"
monkeypatch.setenv("VLLM_USE_MODELSCOPE", "True")
try:
llm = LLM(model=MODELSCOPE_MODEL_NAME)

prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)

outputs = llm.generate(prompts, sampling_params)
assert len(outputs) == 4
finally:
monkeypatch.delenv("VLLM_USE_MODELSCOPE", raising=False)


if __name__ == "__main__":
import pytest
pytest.main([__file__])
6 changes: 5 additions & 1 deletion vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,11 @@ def __init__(
self.revision = revision
self.code_revision = code_revision
self.rope_scaling = rope_scaling
self.tokenizer_revision = tokenizer_revision
# The tokenizer version is consistent with the model version by default.
if tokenizer_revision is None:
self.tokenizer_revision = revision
else:
self.tokenizer_revision = tokenizer_revision
self.quantization = quantization
self.quantization_param_path = quantization_param_path
# UPSTREAM SYNC: keep sparsity
Expand Down
7 changes: 6 additions & 1 deletion vllm/transformers_utils/config.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from typing import Dict, Optional

from transformers import AutoConfig, PretrainedConfig
from transformers import PretrainedConfig

from vllm.envs import VLLM_USE_MODELSCOPE
from vllm.logger import init_logger
from vllm.transformers_utils.configs import (ChatGLMConfig, DbrxConfig,
JAISConfig, MPTConfig, RWConfig)
Expand All @@ -24,6 +25,10 @@ def get_config(model: str,
code_revision: Optional[str] = None,
rope_scaling: Optional[dict] = None) -> PretrainedConfig:
try:
if VLLM_USE_MODELSCOPE:
from modelscope import AutoConfig
else:
from transformers import AutoConfig
config = AutoConfig.from_pretrained(
model,
trust_remote_code=trust_remote_code,
Expand Down

0 comments on commit cbfd3d9

Please sign in to comment.