From 2ec1c824e5526dffbeeb62e7ec220d890183b097 Mon Sep 17 00:00:00 2001 From: Samuel Colvin Date: Sun, 22 Dec 2024 13:28:13 +0000 Subject: [PATCH] fix settings docs formatting (#524) --- pydantic_ai_slim/pydantic_ai/settings.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pydantic_ai_slim/pydantic_ai/settings.py b/pydantic_ai_slim/pydantic_ai/settings.py index e4c3202f..ce0c7db4 100644 --- a/pydantic_ai_slim/pydantic_ai/settings.py +++ b/pydantic_ai_slim/pydantic_ai/settings.py @@ -22,6 +22,7 @@ class ModelSettings(TypedDict, total=False): """The maximum number of tokens to generate before stopping. Supported by: + * Gemini * Anthropic * OpenAI @@ -37,6 +38,7 @@ class ModelSettings(TypedDict, total=False): Note that even with `temperature` of `0.0`, the results will not be fully deterministic. Supported by: + * Gemini * Anthropic * OpenAI @@ -51,6 +53,7 @@ class ModelSettings(TypedDict, total=False): You should either alter `temperature` or `top_p`, but not both. Supported by: + * Gemini * Anthropic * OpenAI @@ -61,6 +64,7 @@ class ModelSettings(TypedDict, total=False): """Override the client-level default timeout for a request, in seconds. Supported by: + * Gemini * Anthropic * OpenAI