diff --git a/pydantic_ai_slim/pydantic_ai/models/ollama.py b/pydantic_ai_slim/pydantic_ai/models/ollama.py index 939c41b8..8a0f3034 100644 --- a/pydantic_ai_slim/pydantic_ai/models/ollama.py +++ b/pydantic_ai_slim/pydantic_ai/models/ollama.py @@ -71,6 +71,7 @@ def __init__( model_name: OllamaModelName, *, base_url: str | None = 'http://localhost:11434/v1/', + api_key: str = 'ollama', openai_client: AsyncOpenAI | None = None, http_client: AsyncHTTPClient | None = None, ): @@ -83,6 +84,8 @@ def __init__( model_name: The name of the Ollama model to use. List of models available [here](https://ollama.com/library) You must first download the model (`ollama pull `) in order to use the model base_url: The base url for the ollama requests. The default value is the ollama default + api_key: The API key to use for authentication. Defaults to 'ollama' for local instances, + but can be customized for proxy setups that require authentication openai_client: An existing [`AsyncOpenAI`](https://github.com/openai/openai-python?tab=readme-ov-file#async-usage) client to use, if provided, `base_url` and `http_client` must be `None`. @@ -96,7 +99,7 @@ def __init__( else: # API key is not required for ollama but a value is required to create the client http_client_ = http_client or cached_async_http_client() - oai_client = AsyncOpenAI(base_url=base_url, api_key='ollama', http_client=http_client_) + oai_client = AsyncOpenAI(base_url=base_url, api_key=api_key, http_client=http_client_) self.openai_model = OpenAIModel(model_name=model_name, openai_client=oai_client) async def agent_model(