diff --git a/assistants/prospector-assistant/.env.example b/assistants/prospector-assistant/.env.example index 0d7447d3..ebda978f 100644 --- a/assistants/prospector-assistant/.env.example +++ b/assistants/prospector-assistant/.env.example @@ -1,6 +1,11 @@ # Description: Example of .env file # Usage: Copy this file to .env and set the values +# NOTE: +# - Environment variables in the host environment will take precedence over values in this file. +# - When running with VS Code, you must 'stop' and 'start' the process for changes to take effect. +# It is not enough to just use the VS Code 'restart' button + # Assistant Service ASSISTANT__AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ ASSISTANT__AZURE_CONTENT_SAFETY_ENDPOINT=https://.cognitiveservices.azure.com/ diff --git a/assistants/prospector-assistant/.vscode/launch.json b/assistants/prospector-assistant/.vscode/launch.json index 6b163735..5be48502 100644 --- a/assistants/prospector-assistant/.vscode/launch.json +++ b/assistants/prospector-assistant/.vscode/launch.json @@ -7,7 +7,7 @@ "name": "assistants: prospector-assistant", "cwd": "${workspaceFolder}", "module": "semantic_workbench_assistant.start", - "args": ["assistant.chat:app", "--port", "3011"], + "args": ["assistant.chat:app"], "consoleTitle": "${workspaceFolderBasename}" } ] diff --git a/assistants/prospector-assistant/assistant/chat.py b/assistants/prospector-assistant/assistant/chat.py index 165ec035..860a00d9 100644 --- a/assistants/prospector-assistant/assistant/chat.py +++ b/assistants/prospector-assistant/assistant/chat.py @@ -54,7 +54,7 @@ # # create the configuration provider, using the extended configuration model # -assistant_config = BaseModelAssistantConfig(AssistantConfigModel()) +assistant_config = BaseModelAssistantConfig(AssistantConfigModel) # define the content safety evaluator factory diff --git a/assistants/prospector-assistant/assistant/config.py b/assistants/prospector-assistant/assistant/config.py index 30ee30c3..93d1dc67 100644 --- a/assistants/prospector-assistant/assistant/config.py +++ b/assistants/prospector-assistant/assistant/config.py @@ -218,7 +218,7 @@ class AssistantConfigModel(BaseModel): ), ] = RequestConfig() - service_config: openai_client.ServiceConfig = openai_client.AzureOpenAIServiceConfig() + service_config: openai_client.ServiceConfig content_safety_config: Annotated[ CombinedContentSafetyEvaluatorConfig, diff --git a/assistants/skill-assistant/.env.example b/assistants/skill-assistant/.env.example index 0d7447d3..ebda978f 100644 --- a/assistants/skill-assistant/.env.example +++ b/assistants/skill-assistant/.env.example @@ -1,6 +1,11 @@ # Description: Example of .env file # Usage: Copy this file to .env and set the values +# NOTE: +# - Environment variables in the host environment will take precedence over values in this file. +# - When running with VS Code, you must 'stop' and 'start' the process for changes to take effect. +# It is not enough to just use the VS Code 'restart' button + # Assistant Service ASSISTANT__AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ ASSISTANT__AZURE_CONTENT_SAFETY_ENDPOINT=https://.cognitiveservices.azure.com/ diff --git a/assistants/skill-assistant/.vscode/launch.json b/assistants/skill-assistant/.vscode/launch.json index 41b2c88d..df9521cd 100644 --- a/assistants/skill-assistant/.vscode/launch.json +++ b/assistants/skill-assistant/.vscode/launch.json @@ -7,7 +7,7 @@ "name": "assistants: skill-assistant", "cwd": "${workspaceFolder}", "module": "semantic_workbench_assistant.start", - "args": ["assistant.skill_assistant:app", "--port", "3012"], + "args": ["assistant.skill_assistant:app"], "consoleTitle": "${workspaceFolderBasename}" } ] diff --git a/assistants/skill-assistant/assistant/config.py b/assistants/skill-assistant/assistant/config.py index a33e4e8f..4c66a065 100644 --- a/assistants/skill-assistant/assistant/config.py +++ b/assistants/skill-assistant/assistant/config.py @@ -119,7 +119,7 @@ class AssistantConfigModel(BaseModel): ), ] = ChatDriverConfig() - service_config: openai_client.ServiceConfig = openai_client.AzureOpenAIServiceConfig() + service_config: openai_client.ServiceConfig content_safety_config: Annotated[ CombinedContentSafetyEvaluatorConfig, diff --git a/examples/python/python-01-echo-bot/.env.example b/examples/python/python-01-echo-bot/.env.example index 3c83c5d7..130e569e 100644 --- a/examples/python/python-01-echo-bot/.env.example +++ b/examples/python/python-01-echo-bot/.env.example @@ -1,8 +1,10 @@ # Description: Example of .env file # Usage: Copy this file to .env and set the values -# NOTE: Changes to this file will not take effect until the project service is 'stopped' and 'started' -# It is not enough to just use the VS Code 'restart' button +# NOTE: +# - Environment variables in the host environment will take precedence over values in this file. +# - When running with VS Code, you must 'stop' and 'start' the process for changes to take effect. +# It is not enough to just use the VS Code 'restart' button # Assistant Service # The ASSISTANT__ prefix is used to group all the environment variables related to the assistant service. diff --git a/examples/python/python-01-echo-bot/.vscode/launch.json b/examples/python/python-01-echo-bot/.vscode/launch.json index a8902025..8ecf3b9e 100644 --- a/examples/python/python-01-echo-bot/.vscode/launch.json +++ b/examples/python/python-01-echo-bot/.vscode/launch.json @@ -7,7 +7,7 @@ "name": "examples: python-01-echo-bot", "cwd": "${workspaceFolder}", "module": "semantic_workbench_assistant.start", - "args": ["assistant.chat:app", "--port", "3001"], + "args": ["assistant.chat:app"], "consoleTitle": "${workspaceFolderBasename}" } ] diff --git a/examples/python/python-01-echo-bot/assistant/chat.py b/examples/python/python-01-echo-bot/assistant/chat.py index f0f64fc2..f17f665f 100644 --- a/examples/python/python-01-echo-bot/assistant/chat.py +++ b/examples/python/python-01-echo-bot/assistant/chat.py @@ -57,9 +57,7 @@ # # create the configuration provider, using the extended configuration model # -assistant_config = BaseModelAssistantConfig( - default=AssistantConfigModel(), -) +assistant_config = BaseModelAssistantConfig(AssistantConfigModel) content_safety = ContentSafety(AlwaysWarnContentSafetyEvaluator.factory) diff --git a/examples/python/python-02-simple-chatbot/.env.example b/examples/python/python-02-simple-chatbot/.env.example index a15ed8fb..ebda978f 100644 --- a/examples/python/python-02-simple-chatbot/.env.example +++ b/examples/python/python-02-simple-chatbot/.env.example @@ -1,8 +1,10 @@ # Description: Example of .env file # Usage: Copy this file to .env and set the values -# NOTE: Changes to this file will not take effect until the project service is 'stopped' and 'started' -# It is not enough to just use the VS Code 'restart' button +# NOTE: +# - Environment variables in the host environment will take precedence over values in this file. +# - When running with VS Code, you must 'stop' and 'start' the process for changes to take effect. +# It is not enough to just use the VS Code 'restart' button # Assistant Service ASSISTANT__AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ diff --git a/examples/python/python-02-simple-chatbot/.vscode/launch.json b/examples/python/python-02-simple-chatbot/.vscode/launch.json index 6fe72259..5938004a 100644 --- a/examples/python/python-02-simple-chatbot/.vscode/launch.json +++ b/examples/python/python-02-simple-chatbot/.vscode/launch.json @@ -7,7 +7,7 @@ "name": "examples: python-02-simple-chatbot", "cwd": "${workspaceFolder}", "module": "semantic_workbench_assistant.start", - "args": ["assistant.chat:app", "--port", "3002"], + "args": ["assistant.chat:app"], "consoleTitle": "${workspaceFolderBasename}" } ] diff --git a/examples/python/python-02-simple-chatbot/assistant/chat.py b/examples/python/python-02-simple-chatbot/assistant/chat.py index 4ca7d0d9..6babafac 100644 --- a/examples/python/python-02-simple-chatbot/assistant/chat.py +++ b/examples/python/python-02-simple-chatbot/assistant/chat.py @@ -63,7 +63,7 @@ # # create the configuration provider, using the extended configuration model # -assistant_config = BaseModelAssistantConfig(AssistantConfigModel()) +assistant_config = BaseModelAssistantConfig(AssistantConfigModel) # define the content safety evaluator factory diff --git a/examples/python/python-02-simple-chatbot/assistant/config.py b/examples/python/python-02-simple-chatbot/assistant/config.py index 0ebd9942..5ce4725d 100644 --- a/examples/python/python-02-simple-chatbot/assistant/config.py +++ b/examples/python/python-02-simple-chatbot/assistant/config.py @@ -135,7 +135,7 @@ class AssistantConfigModel(BaseModel): ), ] = RequestConfig() - service_config: openai_client.ServiceConfig = openai_client.AzureOpenAIServiceConfig() + service_config: openai_client.ServiceConfig content_safety_config: Annotated[ CombinedContentSafetyEvaluatorConfig, diff --git a/examples/python/python-03-multimodel-chatbot/.env.example b/examples/python/python-03-multimodel-chatbot/.env.example index a15ed8fb..ebda978f 100644 --- a/examples/python/python-03-multimodel-chatbot/.env.example +++ b/examples/python/python-03-multimodel-chatbot/.env.example @@ -1,8 +1,10 @@ # Description: Example of .env file # Usage: Copy this file to .env and set the values -# NOTE: Changes to this file will not take effect until the project service is 'stopped' and 'started' -# It is not enough to just use the VS Code 'restart' button +# NOTE: +# - Environment variables in the host environment will take precedence over values in this file. +# - When running with VS Code, you must 'stop' and 'start' the process for changes to take effect. +# It is not enough to just use the VS Code 'restart' button # Assistant Service ASSISTANT__AZURE_OPENAI_ENDPOINT=https://.openai.azure.com/ diff --git a/examples/python/python-03-multimodel-chatbot/.vscode/launch.json b/examples/python/python-03-multimodel-chatbot/.vscode/launch.json index 75be5aaa..545e0773 100644 --- a/examples/python/python-03-multimodel-chatbot/.vscode/launch.json +++ b/examples/python/python-03-multimodel-chatbot/.vscode/launch.json @@ -7,7 +7,7 @@ "name": "examples: python-03-multimodel-chatbot", "cwd": "${workspaceFolder}", "module": "semantic_workbench_assistant.start", - "args": ["assistant.chat:app", "--port", "3003"], + "args": ["assistant.chat:app"], "consoleTitle": "${workspaceFolderBasename}", "justMyCode": false } diff --git a/examples/python/python-03-multimodel-chatbot/assistant/chat.py b/examples/python/python-03-multimodel-chatbot/assistant/chat.py index fffeb523..aad16b97 100644 --- a/examples/python/python-03-multimodel-chatbot/assistant/chat.py +++ b/examples/python/python-03-multimodel-chatbot/assistant/chat.py @@ -62,7 +62,7 @@ # # create the configuration provider, using the extended configuration model # -assistant_config = BaseModelAssistantConfig(AssistantConfigModel()) +assistant_config = BaseModelAssistantConfig(AssistantConfigModel) # define the content safety evaluator factory diff --git a/examples/python/python-03-multimodel-chatbot/assistant/config.py b/examples/python/python-03-multimodel-chatbot/assistant/config.py index e9fc232a..93c1e8e0 100644 --- a/examples/python/python-03-multimodel-chatbot/assistant/config.py +++ b/examples/python/python-03-multimodel-chatbot/assistant/config.py @@ -340,7 +340,7 @@ class AssistantConfigModel(BaseModel): discriminator="service_type", ), UISchema(widget="radio", hide_title=True), - ] = AzureOpenAIServiceConfig() + ] = AzureOpenAIServiceConfig.model_construct() content_safety_config: Annotated[ CombinedContentSafetyEvaluatorConfig, diff --git a/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/config.py b/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/config.py index 6ae10a42..fb64485a 100644 --- a/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/config.py +++ b/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/config.py @@ -6,7 +6,7 @@ from azure.core.credentials import AzureKeyCredential from azure.identity import DefaultAzureCredential -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, HttpUrl from semantic_workbench_assistant import config from semantic_workbench_assistant.config import ConfigSecretStr, UISchema @@ -61,11 +61,9 @@ class AzureServiceKeyAuthConfig(BaseModel): # It will hide the value in the UI. ConfigSecretStr, Field( - default="", title="Azure Service API Key", description="The service API key for your resource instance.", ), - UISchema(placeholder="[optional]"), ] @@ -87,7 +85,9 @@ def get_azure_default_credential() -> DefaultAzureCredential: class AzureContentSafetyEvaluatorConfig(BaseModel): - model_config = ConfigDict(title="Azure Content Safety Evaluator") + model_config = ConfigDict( + title="Azure Content Safety Evaluator", json_schema_extra={"required": ["azure_content_safety_endpoint"]} + ) service_type: Annotated[Literal["azure-content-safety"], UISchema(widget="hidden")] = "azure-content-safety" @@ -129,12 +129,14 @@ class AzureContentSafetyEvaluatorConfig(BaseModel): ] = AzureIdentityAuthConfig() azure_content_safety_endpoint: Annotated[ - str, + HttpUrl, Field( title="Azure Content Safety Service Endpoint", description="The endpoint to use for the Azure Content Safety service.", + default=config.first_env_var("azure_content_safety_endpoint", "assistant__azure_content_safety_endpoint") + or "", ), - ] = config.first_env_var("azure_content_safety_endpoint", "assistant__azure_content_safety_endpoint") or "" + ] # set on the class to avoid re-authenticating for each request _azure_default_credential: DefaultAzureCredential | None = None diff --git a/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/evaluator.py b/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/evaluator.py index e597f695..ee1756ea 100644 --- a/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/evaluator.py +++ b/libraries/python/content-safety/content_safety/evaluators/azure_content_safety/evaluator.py @@ -103,7 +103,7 @@ async def _evaluate_batch(self, text: str) -> ContentSafetyEvaluation: # send the text to the Azure Content Safety service for evaluation try: response = ContentSafetyClient( - endpoint=self.config.azure_content_safety_endpoint, + endpoint=str(self.config.azure_content_safety_endpoint), credential=self.config._get_azure_credentials(), ).analyze_text(AnalyzeTextOptions(text=text)) except Exception as e: diff --git a/libraries/python/content-safety/content_safety/evaluators/config.py b/libraries/python/content-safety/content_safety/evaluators/config.py index 6aa0f02a..e4c8f8b4 100644 --- a/libraries/python/content-safety/content_safety/evaluators/config.py +++ b/libraries/python/content-safety/content_safety/evaluators/config.py @@ -16,4 +16,4 @@ class CombinedContentSafetyEvaluatorConfig(BaseModel): title="Content Safety Evaluator", ), UISchema(widget="radio", hide_title=True), - ] = AzureContentSafetyEvaluatorConfig() + ] = AzureContentSafetyEvaluatorConfig.model_construct() diff --git a/libraries/python/content-safety/content_safety/evaluators/openai_moderations/config.py b/libraries/python/content-safety/content_safety/evaluators/openai_moderations/config.py index 3ab6961e..b9839f88 100644 --- a/libraries/python/content-safety/content_safety/evaluators/openai_moderations/config.py +++ b/libraries/python/content-safety/content_safety/evaluators/openai_moderations/config.py @@ -27,6 +27,9 @@ class OpenAIContentSafetyEvaluatorConfig(BaseModel): model_config = ConfigDict( title="OpenAI Content Safety Evaluator", + json_schema_extra={ + "required": ["openai_api_key"], + }, ) service_type: Annotated[ @@ -60,7 +63,6 @@ class OpenAIContentSafetyEvaluatorConfig(BaseModel): openai_api_key: Annotated[ ConfigSecretStr, Field( - default="", title="OpenAI API Key", description="The API key to use for the OpenAI API.", ), diff --git a/libraries/python/openai-client/.vscode/settings.json b/libraries/python/openai-client/.vscode/settings.json new file mode 100644 index 00000000..47843ee4 --- /dev/null +++ b/libraries/python/openai-client/.vscode/settings.json @@ -0,0 +1,36 @@ +{ + "editor.bracketPairColorization.enabled": true, + "editor.codeActionsOnSave": { + "source.organizeImports": "explicit", + "source.fixAll": "explicit" + }, + "editor.guides.bracketPairs": "active", + "editor.formatOnPaste": true, + "editor.formatOnType": true, + "editor.formatOnSave": true, + "files.eol": "\n", + "files.trimTrailingWhitespace": true, + "python.analysis.autoFormatStrings": true, + "python.analysis.autoImportCompletions": true, + "python.analysis.diagnosticMode": "workspace", + "python.analysis.fixAll": ["source.unusedImports"], + "python.analysis.inlayHints.functionReturnTypes": true, + "python.analysis.typeCheckingMode": "basic", + "python.defaultInterpreterPath": "${workspaceFolder}/.venv", + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.fixAll": "explicit", + "source.unusedImports": "explicit", + "source.organizeImports": "explicit", + "source.formatDocument": "explicit" + } + }, + "ruff.nativeServer": "on", + "search.exclude": { + "**/.venv": true, + "**/.data": true, + "**/__pycache__": true + } +} diff --git a/libraries/python/openai-client/openai_client/client.py b/libraries/python/openai-client/openai_client/client.py index cd1746bd..fda1ea1d 100644 --- a/libraries/python/openai-client/openai_client/client.py +++ b/libraries/python/openai-client/openai_client/client.py @@ -1,12 +1,16 @@ from azure.identity import DefaultAzureCredential, get_bearer_token_provider from openai import AsyncAzureOpenAI, AsyncOpenAI from openai.lib.azure import AsyncAzureADTokenProvider -from .config import ServiceConfig, AzureOpenAIApiKeyAuthConfig, AzureOpenAIAzureIdentityAuthConfig, AzureOpenAIServiceConfig, OpenAIServiceConfig +from .config import ( + ServiceConfig, + AzureOpenAIApiKeyAuthConfig, + AzureOpenAIAzureIdentityAuthConfig, + AzureOpenAIServiceConfig, + OpenAIServiceConfig, +) -def create_client( - service_config: ServiceConfig, *, api_version: str = "2024-08-01-preview" -) -> AsyncOpenAI: +def create_client(service_config: ServiceConfig, *, api_version: str = "2024-08-01-preview") -> AsyncOpenAI: """ Creates an AsyncOpenAI client based on the provided service configuration. """ @@ -17,7 +21,7 @@ def create_client( return AsyncAzureOpenAI( api_key=service_config.auth_config.azure_openai_api_key, azure_deployment=service_config.azure_openai_deployment, - azure_endpoint=service_config.azure_openai_endpoint, + azure_endpoint=str(service_config.azure_openai_endpoint), api_version=api_version, ) @@ -25,13 +29,12 @@ def create_client( return AsyncAzureOpenAI( azure_ad_token_provider=_get_azure_bearer_token_provider(), azure_deployment=service_config.azure_openai_deployment, - azure_endpoint=service_config.azure_openai_endpoint, + azure_endpoint=str(service_config.azure_openai_endpoint), api_version=api_version, ) case _: - raise ValueError( - f"Invalid auth method type: {type(service_config.auth_config)}") + raise ValueError(f"Invalid auth method type: {type(service_config.auth_config)}") case OpenAIServiceConfig(): return AsyncOpenAI( @@ -40,8 +43,7 @@ def create_client( ) case _: - raise ValueError( - f"Invalid service config type: {type(service_config)}") + raise ValueError(f"Invalid service config type: {type(service_config)}") _lazy_initialized_azure_bearer_token_provider = None diff --git a/libraries/python/openai-client/openai_client/config.py b/libraries/python/openai-client/openai_client/config.py index 5f991057..bb183ee4 100644 --- a/libraries/python/openai-client/openai_client/config.py +++ b/libraries/python/openai-client/openai_client/config.py @@ -1,7 +1,7 @@ from enum import StrEnum from typing import Annotated, Literal -from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from pydantic import BaseModel, ConfigDict, Field, HttpUrl from semantic_workbench_assistant.config import ConfigSecretStr, UISchema, first_env_var @@ -31,8 +31,7 @@ class AzureOpenAIApiKeyAuthConfig(BaseModel): }, ) - auth_method: Annotated[Literal[AuthMethodType.APIKey], - UISchema(widget="hidden")] = AuthMethodType.APIKey + auth_method: Annotated[Literal[AuthMethodType.APIKey], UISchema(widget="hidden")] = AuthMethodType.APIKey azure_openai_api_key: Annotated[ # ConfigSecretStr is a custom type that should be used for any secrets. @@ -42,7 +41,7 @@ class AzureOpenAIApiKeyAuthConfig(BaseModel): title="Azure OpenAI API Key", description="The Azure OpenAI API key for your resource instance.", ), - ] = "" + ] class AzureOpenAIServiceConfig(BaseModel): @@ -53,48 +52,44 @@ class AzureOpenAIServiceConfig(BaseModel): }, ) - service_type: Annotated[Literal[ServiceType.AzureOpenAI], UISchema( - widget="hidden")] = ServiceType.AzureOpenAI + service_type: Annotated[Literal[ServiceType.AzureOpenAI], UISchema(widget="hidden")] = ServiceType.AzureOpenAI auth_config: Annotated[ AzureOpenAIAzureIdentityAuthConfig | AzureOpenAIApiKeyAuthConfig, Field( - title="Authentication Config", - description="The authentication configuration to use for the Azure OpenAI API.", + title="Authentication method", + description="The authentication method to use for the Azure OpenAI API.", + default=AzureOpenAIAzureIdentityAuthConfig(), ), - ] = AzureOpenAIAzureIdentityAuthConfig() + UISchema(widget="radio", hide_title=True), + ] azure_openai_endpoint: Annotated[ - str, - StringConstraints(min_length=1), + HttpUrl, Field( title="Azure OpenAI Endpoint", description=( "The Azure OpenAI endpoint for your resource instance. If not provided, the service default will" " be used." ), + default=first_env_var("azure_openai_endpoint", "assistant__azure_openai_endpoint") or "", ), - ] = first_env_var("azure_openai_endpoint", "assistant__azure_openai_endpoint") or "" + ] azure_openai_deployment: Annotated[ str, Field( title="Azure OpenAI Deployment", description="The Azure OpenAI deployment to use.", + default=first_env_var("azure_openai_deployment", "assistant__azure_openai_deployment") or "gpt-4o", ), - ] = first_env_var("azure_openai_deployment", "assistant__azure_openai_deployment") or "gpt-4o" + ] class OpenAIServiceConfig(BaseModel): - model_config = ConfigDict( - title="OpenAI", - json_schema_extra={ - "required": ["openai_api_key"], - }, - ) + model_config = ConfigDict(title="OpenAI", json_schema_extra={"required": ["openai_api_key"]}) - service_type: Annotated[Literal[ServiceType.OpenAI], - UISchema(widget="hidden")] = ServiceType.OpenAI + service_type: Annotated[Literal[ServiceType.OpenAI], UISchema(widget="hidden")] = ServiceType.OpenAI openai_api_key: Annotated[ # ConfigSecretStr is a custom type that should be used for any secrets. @@ -104,7 +99,7 @@ class OpenAIServiceConfig(BaseModel): title="OpenAI API Key", description="The API key to use for the OpenAI API.", ), - ] = "" + ] # spell-checker: ignore rocrupyvzgcl4yf25rqq6d1v openai_organization_id: Annotated[ @@ -116,9 +111,10 @@ class OpenAIServiceConfig(BaseModel): " name. If you do not specify an organization ID, the default organization will be used. Example:" " org-rocrupyvzgcl4yf25rqq6d1v" ), + default="", ), UISchema(placeholder="[optional]"), - ] = "" + ] ServiceConfig = Annotated[ @@ -126,6 +122,7 @@ class OpenAIServiceConfig(BaseModel): Field( title="Service Configuration", discriminator="service_type", + default=AzureOpenAIServiceConfig.model_construct(), ), UISchema(widget="radio", hide_title=True), ] @@ -138,6 +135,6 @@ class OpenAIServiceConfig(BaseModel): import openai_client class MyConfig(BaseModel): - service_config: openai_client.ServiceConfig = openai_client.AzureOpenAIServiceConfig() + service_config: openai_client.ServiceConfig ``` """ diff --git a/libraries/python/semantic-workbench-api-model/semantic_workbench_api_model/assistant_model.py b/libraries/python/semantic-workbench-api-model/semantic_workbench_api_model/assistant_model.py index 13dc9768..9e60a2ed 100644 --- a/libraries/python/semantic-workbench-api-model/semantic_workbench_api_model/assistant_model.py +++ b/libraries/python/semantic-workbench-api-model/semantic_workbench_api_model/assistant_model.py @@ -42,6 +42,7 @@ class StatePutRequestModel(BaseModel): class ConfigResponseModel(BaseModel): config: dict[str, Any] + errors: list[str] | None json_schema: dict[str, Any] | None ui_schema: dict[str, Any] | None diff --git a/libraries/python/semantic-workbench-assistant/.vscode/launch.json b/libraries/python/semantic-workbench-assistant/.vscode/launch.json index 043e823c..a6c142b0 100644 --- a/libraries/python/semantic-workbench-assistant/.vscode/launch.json +++ b/libraries/python/semantic-workbench-assistant/.vscode/launch.json @@ -7,7 +7,7 @@ "name": "canonical-assistant", "cwd": "${workspaceFolder}", "module": "semantic_workbench_assistant.start", - "args": ["semantic_workbench_assistant.canonical:app", "--port", "3100"], + "args": ["semantic_workbench_assistant.canonical:app"], "consoleTitle": "canonical-assistant" } ] diff --git a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/assistant.py b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/assistant.py index 8a22258a..a32a9bc4 100644 --- a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/assistant.py +++ b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/assistant.py @@ -31,7 +31,7 @@ def __init__( assistant_service_id: str, assistant_service_name: str, assistant_service_description: str, - config_provider: AssistantConfigProvider = BaseModelAssistantConfig(EmptyConfigModel()).provider, + config_provider: AssistantConfigProvider = BaseModelAssistantConfig(EmptyConfigModel).provider, data_exporter: AssistantDataExporter = FileStorageAssistantDataExporter(), conversation_data_exporter: ConversationDataExporter = FileStorageConversationDataExporter(), inspector_state_providers: Mapping[str, AssistantConversationInspectorStateProvider] | None = None, diff --git a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/config.py b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/config.py index 55cbd51f..6cb07071 100644 --- a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/config.py +++ b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/config.py @@ -2,7 +2,6 @@ import pathlib from typing import Any, Generic, TypeVar -import deepmerge from pydantic import ( BaseModel, ValidationError, @@ -32,10 +31,9 @@ class BaseModelAssistantConfig(Generic[ConfigModelT]): Assistant-config implementation that uses a BaseModel for default config. """ - def __init__(self, default: ConfigModelT | type[ConfigModelT], ui_schema: dict[str, Any] = {}) -> None: - default = default() if isinstance(default, type) else default - self._default = default - self._ui_schema = deepmerge.always_merger.merge(get_ui_schema(default.__class__), ui_schema) + def __init__(self, cls: type[ConfigModelT]) -> None: + self._cls = cls + self._ui_schema = get_ui_schema(cls) def _private_path_for(self, assistant_context: AssistantContext) -> pathlib.Path: # store assistant config, including secrets, in a separate partition that is never exported @@ -54,11 +52,11 @@ async def get(self, assistant_context: AssistantContext) -> ConfigModelT: config = None try: - config = read_model(path, self._default.__class__) + config = read_model(path, self._cls) except ValidationError as e: logger.warning("exception reading config; path: %s", path, exc_info=e) - return config or self._default + return config or self._cls.model_construct() async def _set(self, assistant_context: AssistantContext, config: ConfigModelT) -> None: # save the config with secrets serialized with their actual values for the assistant @@ -86,15 +84,23 @@ def __init__(self, provider: BaseModelAssistantConfig[ConfigModelT]) -> None: async def get(self, assistant_context: AssistantContext) -> AssistantConfigDataModel: config = await self._provider.get(assistant_context) + errors = [] + try: + config.model_validate(config.model_dump()) + except ValidationError as e: + for error in e.errors(include_url=False): + errors.append(str(error)) + return AssistantConfigDataModel( config=config.model_dump(mode="json"), + errors=errors, json_schema=config.model_json_schema(), ui_schema=self._provider._ui_schema, ) async def set(self, assistant_context: AssistantContext, config: dict[str, Any]) -> None: try: - updated_config = self._provider._default.model_validate(config) + updated_config = self._provider._cls.model_validate(config) except ValidationError as e: raise BadRequestError(str(e)) diff --git a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/protocol.py b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/protocol.py index d8210599..6d95d95a 100644 --- a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/protocol.py +++ b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/protocol.py @@ -76,6 +76,7 @@ async def import_(self, context: ConversationContext, stream: IO[bytes]) -> None @dataclass class AssistantConfigDataModel: config: dict[str, Any] + errors: list[str] | None = field(default=None) json_schema: dict[str, Any] | None = field(default=None) ui_schema: dict[str, Any] | None = field(default=None) diff --git a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/service.py b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/service.py index a2aa08bf..fc0f6779 100644 --- a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/service.py +++ b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/assistant_app/service.py @@ -226,6 +226,7 @@ async def get_service_info(self) -> assistant_model.ServiceInfoModel: description=self.service_description, default_config=assistant_model.ConfigResponseModel( config=default_config.config, + errors=default_config.errors, json_schema=default_config.json_schema, ui_schema=default_config.ui_schema, ), @@ -307,6 +308,7 @@ async def get_config(self, assistant_id: str) -> assistant_model.ConfigResponseM config = await self.assistant_app._config_provider.get(assistant_context) return assistant_model.ConfigResponseModel( config=config.config, + errors=config.errors, json_schema=config.json_schema, ui_schema=config.ui_schema, ) diff --git a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/config.py b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/config.py index efeb00da..6bfe722a 100644 --- a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/config.py +++ b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/config.py @@ -24,21 +24,24 @@ def first_env_var(*env_vars: str, include_upper_and_lower: bool = True, include_ Args: include_upper_and_lower: if True, then the UPPER and lower case versions of the env vars will be checked. - include_dot_env: if True, then the .env file will be checked for the env vars. + include_dot_env: if True, then the .env file will be checked for the env vars after the os. """ if include_upper_and_lower: env_vars = (*env_vars, *[env_var.upper() for env_var in env_vars], *[env_var.lower() for env_var in env_vars]) - dot_env_values = {} - if include_dot_env: - dotenv_path = dotenv.find_dotenv(usecwd=True) - if dotenv_path: - dot_env_values = dotenv.dotenv_values(dotenv_path) - for env_var in env_vars: if env_var in os.environ: return os.environ[env_var] + if not include_dot_env: + return None + + dotenv_path = dotenv.find_dotenv(usecwd=True) + if not dotenv_path: + return None + + dot_env_values = dotenv.dotenv_values(dotenv_path) + for env_var in env_vars: if env_var in dot_env_values: return dot_env_values[env_var] diff --git a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/start.py b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/start.py index 9e5730c9..c81ce1f0 100644 --- a/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/start.py +++ b/libraries/python/semantic-workbench-assistant/semantic_workbench_assistant/start.py @@ -1,7 +1,9 @@ import argparse import logging import os +import socket import sys +from contextlib import closing import uvicorn @@ -21,8 +23,7 @@ def main(): "--port", dest="port", type=int, - default=settings.port, - help="port to run service on", + help="port to run service on; if not specified, a random port will be selected", ) parse_args.add_argument("--host", dest="host", type=str, default=settings.host, help="host IP to run service on") parse_args.add_argument( @@ -90,7 +91,7 @@ def main(): logger.info("Enabling auto-reload ...") settings.host = args.host - settings.port = args.port + settings.port = args.port or find_free_port(settings.host) settings.assistant_service_id = args.assistant_service_id settings.assistant_service_name = args.assistant_service_name settings.assistant_service_description = args.assistant_service_description @@ -106,5 +107,12 @@ def main(): ) +def find_free_port(host: str): + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + s.bind((host, 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return s.getsockname()[1] + + if __name__ == "__main__": main() diff --git a/libraries/python/semantic-workbench-assistant/tests/test_assistant_app.py b/libraries/python/semantic-workbench-assistant/tests/test_assistant_app.py index e9560120..0777cbc9 100644 --- a/libraries/python/semantic-workbench-assistant/tests/test_assistant_app.py +++ b/libraries/python/semantic-workbench-assistant/tests/test_assistant_app.py @@ -340,7 +340,7 @@ class TestConfigModel(BaseModel): test_key: str = "test_value" secret_field: ConfigSecretStr = "secret_default" - config_provider = BaseModelAssistantConfig(TestConfigModel()).provider + config_provider = BaseModelAssistantConfig(TestConfigModel).provider # wrap the provider so we can check calls to it config_provider_wrapper = mock.Mock(wraps=config_provider) @@ -373,6 +373,7 @@ class TestConfigModel(BaseModel): response = await instance_client.get_config() assert response == assistant_model.ConfigResponseModel( config={"test_key": "test_value", "secret_field": "**********"}, + errors=[], json_schema=TestConfigModel.model_json_schema(), ui_schema=expected_ui_schema, ) @@ -385,6 +386,7 @@ class TestConfigModel(BaseModel): ) assert response == assistant_model.ConfigResponseModel( config={"test_key": "new_value", "secret_field": "**********"}, + errors=[], json_schema=TestConfigModel.model_json_schema(), ui_schema=expected_ui_schema, ) @@ -399,6 +401,7 @@ class TestConfigModel(BaseModel): response = await instance_client.get_config() assert response == assistant_model.ConfigResponseModel( config={"test_key": "new_value", "secret_field": "**********"}, + errors=[], json_schema=TestConfigModel.model_json_schema(), ui_schema=expected_ui_schema, ) @@ -432,6 +435,7 @@ class TestConfigModel(BaseModel): ) assert response == assistant_model.ConfigResponseModel( config={"test_key": "new_value", "secret_field": ""}, + errors=[], json_schema=TestConfigModel.model_json_schema(), ui_schema=expected_ui_schema, ) diff --git a/libraries/python/skills/notebooks/.env.example b/libraries/python/skills/notebooks/.env.example index 960b3b6c..8f408f02 100644 --- a/libraries/python/skills/notebooks/.env.example +++ b/libraries/python/skills/notebooks/.env.example @@ -1,3 +1,8 @@ +# NOTE: +# - Environment variables in the host environment will take precedence over values in this file. +# - When running with VS Code, you must 'stop' and 'start' the process for changes to take effect. +# It is not enough to just use the VS Code 'restart' button + AZURE_OPENAI_ENDPOINT="https://lightspeed-team-shared-openai-eastus.openai.azure.com/" AZURE_OPENAI_DEPLOYMENT="gpt-4o-mini" AZURE_OPENAI_API_VERSION="2023-05-15" diff --git a/tools/run-python-example2.ps1 b/tools/run-python-example2.ps1 index 39a73541..17e02303 100644 --- a/tools/run-python-example2.ps1 +++ b/tools/run-python-example2.ps1 @@ -16,4 +16,4 @@ Set-Location "examples/python/python-02-simple-chatbot" # Run the commands uv sync -uv run start-semantic-workbench-assistant assistant.chat:app --port 3002 \ No newline at end of file +uv run start-semantic-workbench-assistant assistant.chat:app diff --git a/tools/run-python-example2.sh b/tools/run-python-example2.sh index 52f43e2f..6554e46a 100755 --- a/tools/run-python-example2.sh +++ b/tools/run-python-example2.sh @@ -8,4 +8,4 @@ cd $ROOT cd examples/python/python-02-simple-chatbot uv sync -uv run start-semantic-workbench-assistant assistant.chat:app --port 3002 +uv run start-semantic-workbench-assistant assistant.chat:app diff --git a/workbench-app/.vscode/launch.json b/workbench-app/.vscode/launch.json index 8c9861b8..6e467952 100644 --- a/workbench-app/.vscode/launch.json +++ b/workbench-app/.vscode/launch.json @@ -1,15 +1,15 @@ { - "version": "0.2.0", - "configurations": [ - { - "type": "node", - "request": "launch", - "name": "app: semantic-workbench-app", - "cwd": "${workspaceFolder}", - "skipFiles": ["/**"], - "console": "integratedTerminal", - "runtimeExecutable": "npm", - "runtimeArgs": ["run", "dev"] - } - ] + "version": "0.2.0", + "configurations": [ + { + "type": "node", + "request": "launch", + "name": "app: semantic-workbench-app", + "cwd": "${workspaceFolder}", + "skipFiles": ["/**"], + "console": "integratedTerminal", + "runtimeExecutable": "npm", + "runtimeArgs": ["run", "dev"] + } + ] } diff --git a/workbench-app/package.json b/workbench-app/package.json index d889249c..65f364c8 100644 --- a/workbench-app/package.json +++ b/workbench-app/package.json @@ -9,7 +9,7 @@ "depcheck": "depcheck --ignores=\"@types/*,ts-prune,typescript,vite\" --ignore-dirs=\".vscode,.vs,.git,node_modules\" --skip-missing", "dev": "vite", "find-deadcode": "node ./tools/filtered-ts-prune.cjs", - "format": "prettier --write .", + "format": "prettier --write src", "lint": "eslint src --fix", "prettify": "prettier --write \"src/**/*.{ts,tsx,js,jsx,json,scss,css,html,svg}\"", "preview": "vite preview", diff --git a/workbench-app/src/components/App/AppFooter.tsx b/workbench-app/src/components/App/AppFooter.tsx index f3c9a248..bad236b5 100644 --- a/workbench-app/src/components/App/AppFooter.tsx +++ b/workbench-app/src/components/App/AppFooter.tsx @@ -39,10 +39,7 @@ export const AppFooter: React.FC = () => { Trademarks {' '} |{' '} - + @GitHub {' '} | © Microsoft 2024 diff --git a/workbench-app/src/components/Assistants/AssistantEdit.tsx b/workbench-app/src/components/Assistants/AssistantEdit.tsx index 4bc7bdb7..c9684397 100644 --- a/workbench-app/src/components/Assistants/AssistantEdit.tsx +++ b/workbench-app/src/components/Assistants/AssistantEdit.tsx @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. import { Button, Card, Divider, makeStyles, shorthands, Text, tokens } from '@fluentui/react-components'; +import { Warning24Filled } from '@fluentui/react-icons'; import Form from '@rjsf/fluentui-rc'; import { RegistryWidgetsType, RJSFSchema } from '@rjsf/utils'; import validator from '@rjsf/validator-ajv8'; @@ -34,6 +35,13 @@ const useClasses = makeStyles({ padding: '8px', ...shorthands.border(tokens.strokeWidthThin, 'solid', tokens.colorNeutralStroke1), }, + warning: { + display: 'flex', + flexDirection: 'row', + alignItems: 'center', + fontWeight: tokens.fontWeightSemibold, + color: tokens.colorPaletteRedForeground1, + }, }); interface AssistantInstanceEditProps { @@ -51,6 +59,7 @@ export const AssistantEdit: React.FC = (props) => { const [updateConfig] = useUpdateConfigMutation(); const [formData, setFormData] = React.useState(); const [isDirty, setDirty] = React.useState(false); + const [isValid, setValid] = React.useState(true); if (configError) { const errorMessage = JSON.stringify(configError); @@ -62,9 +71,8 @@ export const AssistantEdit: React.FC = (props) => { setFormData(config?.config); }, [isLoadingConfig, config]); - const handleChange = async (updatedConfig: object) => { + const handleSubmit = async (updatedConfig: object) => { if (!config) return; - setFormData(updatedConfig); await updateConfig({ assistantId: assistant.id, config: { ...config, config: updatedConfig } }); setDirty(false); }; @@ -82,6 +90,18 @@ export const AssistantEdit: React.FC = (props) => { const diff = Utility.deepDiff(config.config, formData); setDirty(Object.keys(diff).length > 0); } + + if (config?.jsonSchema && formData) { + // Validate the form data against the JSON schema + const { errors } = validator.validateFormData( + formData, + config.jsonSchema, + undefined, + undefined, + config.uiSchema, + ); + setValid(errors.length === 0); + } }, [config, formData]); if (isLoadingConfig || !config) { @@ -121,7 +141,7 @@ export const AssistantEdit: React.FC = (props) => {
- = (props) => { newConfig={defaults} onApply={restoreConfig} /> + {!isValid && ( +
+ Configuration has missing or invalid values +
+ )}
= (props) => { }, }} validator={validator} + liveValidate={true} + showErrorList={false} formData={formData} onChange={(data) => { setFormData(data.formData); }} onSubmit={(data, event) => { event.preventDefault(); - handleChange(data.formData); + handleSubmit(data.formData); }} /> diff --git a/workbench-app/src/libs/WorkbenchEventSource.ts b/workbench-app/src/libs/WorkbenchEventSource.ts index 084df504..e3bf9511 100644 --- a/workbench-app/src/libs/WorkbenchEventSource.ts +++ b/workbench-app/src/libs/WorkbenchEventSource.ts @@ -146,7 +146,7 @@ export class WorkbenchEventSource { signal: abortSignal, openWhenHidden: true, headers: { - 'Authorization': `Bearer ${accessToken}`, + Authorization: `Bearer ${accessToken}`, 'X-OpenIdToken': idToken, }, onmessage(event: EventSourceMessage) { diff --git a/workbench-service/semantic_workbench_service/config.py b/workbench-service/semantic_workbench_service/config.py index cfa98a47..0e0f199d 100644 --- a/workbench-service/semantic_workbench_service/config.py +++ b/workbench-service/semantic_workbench_service/config.py @@ -29,7 +29,7 @@ class AuthSettings(BaseSettings): class WebServiceSettings(BaseSettings): protocol: str = "http" hostname: str = "127.0.0.1" - port: int | None = 3000 + port: int = 3000 assistant_api_key: ApiKeySettings = ApiKeySettings() diff --git a/workbench-service/semantic_workbench_service/controller/assistant_service_client_pool.py b/workbench-service/semantic_workbench_service/controller/assistant_service_client_pool.py index 714ad8ab..52cc6707 100644 --- a/workbench-service/semantic_workbench_service/controller/assistant_service_client_pool.py +++ b/workbench-service/semantic_workbench_service/controller/assistant_service_client_pool.py @@ -1,11 +1,12 @@ import asyncio from typing import Self -import uuid + from semantic_workbench_api_model.assistant_service_client import ( AssistantInstanceClient, AssistantServiceClient, AssistantServiceClientBuilder, ) + from .. import assistant_api_key, db @@ -13,7 +14,7 @@ class AssistantServiceClientPool: def __init__(self, api_key_store: assistant_api_key.ApiKeyStore) -> None: self._api_key_store = api_key_store self._service_clients: dict[str, AssistantServiceClient] = {} - self._assistant_clients: dict[uuid.UUID, AssistantInstanceClient] = {} + self._assistant_clients: dict[str, AssistantInstanceClient] = {} self._client_lock = asyncio.Lock() def __aenter__(self) -> Self: @@ -27,25 +28,29 @@ async def __aexit__(self, exc_type, exc_value, traceback) -> None: async def service_client(self, registration: db.AssistantServiceRegistration) -> AssistantServiceClient: service_id = registration.assistant_service_id + url = registration.assistant_service_url + key = f"{service_id}-{url}" - if service_id not in self._service_clients: + if key not in self._service_clients: async with self._client_lock: - if service_id not in self._service_clients: - self._service_clients[service_id] = (await self._client_builder(registration)).for_service() + if key not in self._service_clients: + self._service_clients[key] = (await self._client_builder(registration)).for_service() - return self._service_clients[service_id] + return self._service_clients[key] async def assistant_instance_client(self, assistant: db.Assistant) -> AssistantInstanceClient: assistant_id = assistant.assistant_id + url = assistant.related_assistant_service_registration.assistant_service_url + key = f"{assistant_id}-{url}" - if assistant_id not in self._assistant_clients: + if key not in self._assistant_clients: async with self._client_lock: - if assistant_id not in self._assistant_clients: - self._assistant_clients[assistant_id] = ( + if key not in self._assistant_clients: + self._assistant_clients[key] = ( await self._client_builder(assistant.related_assistant_service_registration) ).for_assistant_instance(assistant_id) - return self._assistant_clients[assistant_id] + return self._assistant_clients[key] async def _client_builder( self, diff --git a/workbench-service/semantic_workbench_service/start.py b/workbench-service/semantic_workbench_service/start.py index b3fa443a..df852f5e 100644 --- a/workbench-service/semantic_workbench_service/start.py +++ b/workbench-service/semantic_workbench_service/start.py @@ -42,7 +42,12 @@ def main(): logger.info("Starting workbench service ...") app = create_app() - uvicorn.run(app, host=args.host, port=args.port, log_config={"version": 1, "disable_existing_loggers": False}) + uvicorn.run( + app, + host=settings.service.hostname, + port=settings.service.port, + log_config={"version": 1, "disable_existing_loggers": False}, + ) if __name__ == "__main__": diff --git a/workbench-service/tests/conftest.py b/workbench-service/tests/conftest.py index c422a8e3..a929dee6 100644 --- a/workbench-service/tests/conftest.py +++ b/workbench-service/tests/conftest.py @@ -1,4 +1,5 @@ import asyncio +import os import pathlib import tempfile import uuid @@ -49,19 +50,29 @@ def test_user_2(monkeypatch: pytest.MonkeyPatch) -> MockUser: return create_test_user(monkeypatch) +def env_var(name: str) -> str | None: + if name in os.environ: + return os.environ[name] + dotenv_path = dotenv.find_dotenv(usecwd=True) + if not dotenv_path: + return None + dotenv_values = dotenv.dotenv_values(dotenv_path) + return dotenv_values.get(name) + + def pytest_addoption(parser: pytest.Parser): parser.addoption( "--echosql", action="store_true", help="echo db sql statements", - default=(dotenv.dotenv_values().get("WORKBENCH_PYTEST_ECHOSQL") or "").lower() in ["true", "1"], + default=(env_var("WORKBENCH_PYTEST_ECHOSQL") or "").lower() in ["true", "1"], ) parser.addoption( "--dbtype", action="store", help="database type", choices=["sqlite", "postgresql"], - default=dotenv.dotenv_values().get("WORKBENCH_PYTEST_DBTYPE") or "sqlite", + default=env_var("WORKBENCH_PYTEST_DBTYPE") or "sqlite", )