From 358769ec2a02d44aae1188aa51f95abbb1029793 Mon Sep 17 00:00:00 2001 From: Marc Goodner Date: Fri, 21 Feb 2025 11:03:08 -0800 Subject: [PATCH] Updates for Codespaces assistant demo (#337) Changed some defaults, updated the readme --- .../codespace-assistant/.vscode/launch.json | 10 + assistants/codespace-assistant/README.md | 181 +++++++++++++----- .../assistant_extensions/mcp/_model.py | 44 +++-- 3 files changed, 174 insertions(+), 61 deletions(-) diff --git a/assistants/codespace-assistant/.vscode/launch.json b/assistants/codespace-assistant/.vscode/launch.json index 6b34f89b..413dc08a 100644 --- a/assistants/codespace-assistant/.vscode/launch.json +++ b/assistants/codespace-assistant/.vscode/launch.json @@ -23,5 +23,15 @@ "mcp-servers: mcp-server-open-deep-research" ] } + ], + "compounds": [ + { + "name": "assistants: codespace-assistant (demo)", + "configurations": [ + "assistants: codespace-assistant", + "app: semantic-workbench-app", + "service: semantic-workbench-service" + ] + } ] } diff --git a/assistants/codespace-assistant/README.md b/assistants/codespace-assistant/README.md index 2e825b4a..16a9c42d 100644 --- a/assistants/codespace-assistant/README.md +++ b/assistants/codespace-assistant/README.md @@ -1,75 +1,156 @@ -# An assistant for help in developing within the Semantic Workbench repo in Codespaces +# Codespace Assistant NOTE: DO NOT DEPLOY THIS ASSISTANT OUTSIDE OF CODESPACES (OR LOCAL, BUT THIS HAS NOT BEEN TESTED) This assistant is designed to help with development within the Semantic Workbench repo in Codespaces, and is not intended for deployment to production environments. -## Responsible AI +The Codespace Assistant is designed to help developers work within the **Semantic Workbench** repository, particularly in **GitHub Codespaces** and **VS Code**. It provides tools, guidance, and integrations to simplify coding, debugging, and interacting with your projects. While optimized for Codespaces, it can also be used in local environments with some caveats. -The chatbot includes some important best practices for AI development, such as: +--- -- **System prompt safety**, ie a set of LLM guardrails to protect users. As a developer you should understand how these - guardrails work in your scenarios, and how to change them if needed. The system prompt and the prompt safety - guardrails are split in two to help with testing. When talking to LLM models, prompt safety is injected before the - system prompt. - - See https://learn.microsoft.com/azure/ai-services/openai/concepts/system-message for more details - about protecting application and users in different scenarios. -- **Content moderation**, via [Azure AI Content Safety](https://azure.microsoft.com/products/ai-services/ai-content-safety) - or [OpenAI Content Moderation](https://platform.openai.com/docs/guides/moderation). +## Key Features -See the [Responsible AI FAQ](../../RESPONSIBLE_AI_FAQ.md) for more information. +- **MCP Server Integration**: + - Provides access to tools like the MCP filesystem and VS Code servers by default. + - Additional tools (e.g., Bing Search, OpenAI integrations) can be enabled via configuration. +- **Content Safety and Guardrails**: + - Integrated with Azure OpenAI and OpenAI APIs for responsible AI usage. + - Includes prompts for instruction, guidance, and guardrails. +- **Codespaces Optimization**: + - Ready to run directly within Codespaces for a streamlined developer experience. + - Also supports local setups, but **Windows users must use WSL** due to Linux dependencies. -# Suggested Development Environment +--- -- Use GitHub Codespaces for a quick, turn-key dev environment: [/.devcontainer/README.md](../../.devcontainer/README.md) -- VS Code is recommended for development +## Prerequisites -## Pre-requisites +### Codespaces Development +- Follow the guide in [Optimizing for Codespaces](../../.devcontainer/OPTIMIZING_FOR_CODESPACES.md) to set up your environment. +- **Using VS Code Desktop**: + - Open the workspace: `/workspaces/semanticworkbench/semantic-workbench.code-workspace`. -- Set up your dev environment - - SUGGESTED: Use GitHub Codespaces for a quick, easy, and consistent dev - environment: [/.devcontainer/README.md](../../.devcontainer/README.md) - - ALTERNATIVE: Local setup following the [main README](../../README.md#local-development-environment) -- Set up and verify that the workbench app and service are running using the [semantic-workbench.code-workspace](../../semantic-workbench.code-workspace) -- If using Azure OpenAI, set up an Azure account and create a Content Safety resource - - See [Azure AI Content Safety](https://azure.microsoft.com/products/ai-services/ai-content-safety) for more information - - Copy the `.env.example` to `.env` and update the `ASSISTANT__AZURE_CONTENT_SAFETY_ENDPOINT` value with the endpoint of your Azure Content Safety resource - - From VS Code > `Terminal`, run `az login` to authenticate with Azure prior to starting the assistant +### Local Development +- Refer to [Setup Developer Environment](../../docs/SETUP_DEV_ENVIRONMENT.md) for full instructions. +- **Windows Users**: + - Must host the repository in **WSL (Windows Subsystem for Linux)** due to Linux library dependencies. -## Steps +### Authentication +- You must authenticate with the Semantic Workbench using a **Microsoft or organizational account**. See [Workbench App Overview](../../docs/WORKBENCH_APP.md) for details. -- Use VS Code > `Run and Debug` (ctrl/cmd+shift+d) > `semantic-workbench` to start the app and service from this workspace -- Use VS Code > `Run and Debug` (ctrl/cmd+shift+d) > `launch assistant` to start the assistant. -- If running in a devcontainer, follow the instructions in [.devcontainer/POST_SETUP_README.md](../../.devcontainer/POST_SETUP_README.md#start-the-app-and-service) for any additional steps. -- Return to the workbench app to interact with the assistant -- Add a new assistant from the main menu of the app, choose the assistant name as defined by the `service_name` in [chat.py](./assistant/chat.py) -- Click the newly created assistant to configure and interact with it +--- -## Starting the example from CLI +## Setup Instructions -If you're not using VS Code and/or Codespaces, you can also work from the -command line, using `uv`: +### Creating a Codespace +1. Go to the **Semantic Workbench** repository in GitHub. +2. Create a new Codespace. +3. Open the Codespace in **VS Code Desktop**. + - Open the workspace file: `/workspaces/semanticworkbench/semantic-workbench.code-workspace`. -``` -cd +### Configure `.env` Variables +1. Navigate to the folder: `/assistants/codespace-assistant`. +2. Copy `.env.example` to `.env`. +3. Replace default values with your resource details for **Azure OpenAI** and **OpenAI** APIs. + - **Azure**: + - `ASSISTANT__AZURE_OPENAI_ENDPOINT`: Azure OpenAI endpoint. + - `ASSISTANT__AZURE_OPENAI_API_KEY`: Azure API key (use managed identities if possible). + - `ASSISTANT__AZURE_CONTENT_SAFETY_ENDPOINT`: Azure Content Safety endpoint. + - **OpenAI**: + - `ASSISTANT__OPENAI_API_KEY`: API key for OpenAI. + - `ASSISTANT__OPENAI_ORGANIZATION_ID`: Organization ID (optional). -uv run start-assistant -``` +### First Launch +1. Go to the **Debug** pane in VS Code. +2. Select `assistants: codespace-assistant (demo)`. +3. Start the assistant. + - For more MCP servers, select `assistants: codespace-assistant (for dev)` (requires custom API keys). +4. Open your browser: [https://127.0.0.1:4000/](https://127.0.0.1:4000/). + - Click "Advanced" > "Proceed to localhost" to bypass security warnings. +5. Create a conversation and add the assistant: + - Provide a title. + - Create a new assistant and select the Codespace Assistant service. + - Start interacting with the assistant (e.g., ask questions about the repo). -## Create your own assistant +--- -Copy the contents of this folder to your project. +## Extending Functionality -- The paths are already set if you put in the same repo root and relative path of `//` -- If placed in a different location, update the references in the `pyproject.toml` to point to the appropriate locations for the `semantic-workbench-*` packages +### Add Your Own Code +1. Open a terminal in VS Code. +2. Navigate to the `/workspaces` directory (default MCP filesystem server location). +3. Clone your repository or create a new folder. + - Optionally, add it to the workspace using **File > Add Folder to Workspace**. -## From Development to Production +The assistant can now read, write, and edit your custom code. -It's important to highlight how Semantic Workbench is a development tool, and it's not designed to host agents in -a production environment. The workbench helps with testing and debugging, in a development and isolated environment, usually your localhost. +--- -The core of your assistant/AI application, e.g. how it reacts to users, how it invokes tools, how it stores data, can be -developed with any framework, such as Semantic Kernel, Langchain, OpenAI assistants, etc. That is typically the code -you will add to `chat.py`. +## Additional MCP Servers (Advanced) -**Semantic Workbench is not a framework**. Dependencies on `semantic-workbench-assistant` package are used only to test and debug your code in Semantic Workbench. **When an assistant is fully developed and ready for production, configurable settings should be hard coded, dependencies on `semantic-workbench-assistant` and similar should be removed**. +The `assistants: codespace-assistant (for dev)` debug configuration enables additional MCP servers not active by default. These servers can extend the assistant's functionality, but they require custom API keys to activate. + +### Available MCP Servers + +1. **Bing Search**: + - **Command**: `http://127.0.0.1:6030/sse` + - **Purpose**: Enables search capabilities via Bing. + +2. **Open Deep Research**: + - **Command**: `http://127.0.0.1:6020/sse` + - **Purpose**: Facilitates deeper research workflows. + +3. **Giphy**: + - **Command**: `http://127.0.0.1:6000/sse` + - **Purpose**: Fetches GIFs for use in conversations. + +4. **Memory**: + - **Command**: `npx @modelcontextprotocol/server-memory` + - **Purpose**: Integrates a memory or knowledge graph system. + +5. **Sequential Thinking**: + - **Command**: `npx @modelcontextprotocol/server-sequential-thinking` + - **Purpose**: Enables tools for sequential reasoning tasks. + +### How to Enable Additional MCP Servers + +1. Use the assistant configuration interface to enable these MCP servers directly. In the Semantic Workbench, navigate to the assistant's configuration panel, locate the MCP server settings, and toggle the desired servers on. +3. Check the `.env.example` file for each server's required API keys and configuration. +4. To enable a server, update the `.env` file with the necessary values and restart the assistant. + +--- + +## Frequently Asked Questions (FAQs) + +### Authentication and Access +- **Q**: How do I log into the Semantic Workbench? + - **A**: Log in using your Microsoft or organizational account. See [Workbench App Overview](../../docs/WORKBENCH_APP.md). + +### Common Errors +1. **Azure Content Safety Error**: + - Issue: `Bearer token authentication is not permitted for non-HTTPS URLs.` + - Solution: Configure the endpoint properly. +2. **Blank Screen on Startup**: + - Check if pop-up blockers are preventing access. +3. **Connection Issues on 127.0.0.1**: + - Ensure you're navigating to `https://127.0.0.1:4000/`. + +### Enabling MCP Servers +- Navigate to the assistant configuration panel and enable or configure servers as needed. +- By default, the filesystem and VS Code servers are active. Others, like Bing Search or Giphy, can be enabled manually. + +### Limits and Customization +1. **Maximum Steps Reached**: + - Expand the assistant's steps by updating the `Maximum Steps` setting in the assistant configuration. +2. **Folder Not Found**: + - Verify the path is under `/workspaces`. Adjust permissions if needed. + +--- + +## Additional Resources + +- [Optimizing for Codespaces](../../.devcontainer/OPTIMIZING_FOR_CODESPACES.md) +- [Workbench App Overview](../../docs/WORKBENCH_APP.md) +- [Setup Developer Environment](../../docs/SETUP_DEV_ENVIRONMENT.md) +- [Assistant Development Guide](../../docs/ASSISTANT_DEVELOPMENT_GUIDE.md) + +For issues, see the [Semantic Workbench README](../../README.md) or raise a question in the repository. \ No newline at end of file diff --git a/libraries/python/assistant-extensions/assistant_extensions/mcp/_model.py b/libraries/python/assistant-extensions/assistant_extensions/mcp/_model.py index e7e59a54..7b7f2c41 100644 --- a/libraries/python/assistant-extensions/assistant_extensions/mcp/_model.py +++ b/libraries/python/assistant-extensions/assistant_extensions/mcp/_model.py @@ -16,23 +16,38 @@ class MCPServerEnvConfig(BaseModel): key: Annotated[str, Field(title="Key", description="Environment variable key.")] - value: Annotated[str, Field(title="Value", description="Environment variable value.")] + value: Annotated[ + str, Field(title="Value", description="Environment variable value.") + ] class MCPServerConfig(BaseModel): - enabled: Annotated[bool, Field(title="Enabled", description="Enable the server.")] = True + enabled: Annotated[ + bool, Field(title="Enabled", description="Enable the server.") + ] = True - key: Annotated[str, Field(title="Key", description="Unique key for the server configuration.")] + key: Annotated[ + str, Field(title="Key", description="Unique key for the server configuration.") + ] command: Annotated[ - str, Field(title="Command", description="Command to run the server, use url if using SSE transport.") + str, + Field( + title="Command", + description="Command to run the server, use url if using SSE transport.", + ), ] - args: Annotated[List[str], Field(title="Arguments", description="Arguments to pass to the server.")] + args: Annotated[ + List[str], + Field(title="Arguments", description="Arguments to pass to the server."), + ] env: Annotated[ List[MCPServerEnvConfig], - Field(title="Environment Variables", description="Environment variables to set."), + Field( + title="Environment Variables", description="Environment variables to set." + ), ] = [] prompt: Annotated[ @@ -43,7 +58,9 @@ class MCPServerConfig(BaseModel): long_running: Annotated[ bool, - Field(title="Long Running", description="Does this server run long running tasks?"), + Field( + title="Long Running", description="Does this server run long running tasks?" + ), ] = False task_completion_estimate: Annotated[ @@ -70,7 +87,7 @@ class MCPToolsConfigModel(BaseModel): title="Maximum Steps", description="The maximum number of steps to take when using tools, to avoid infinite loops.", ), - ] = 5 + ] = 50 max_steps_truncation_message: Annotated[ str, @@ -113,13 +130,16 @@ class MCPToolsConfigModel(BaseModel): MCPServerConfig( key="filesystem", command="npx", - args=["-y", "@modelcontextprotocol/server-filesystem", "/workspaces/semanticworkbench"], + args=[ + "-y", + "@modelcontextprotocol/server-filesystem", + "/workspaces/semanticworkbench", + ], ), MCPServerConfig( key="vscode", command="http://127.0.0.1:6010/sse", args=[], - enabled=False, ), MCPServerConfig( key="bing-search", @@ -200,7 +220,9 @@ async def initialize(self) -> None: # Load all tools from the session, later we can do the same for resources, prompts, etc. tools_result = await self.client_session.list_tools() self.tools = tools_result.tools - logger.debug(f"Loaded {len(tools_result.tools)} tools from session '{self.config.key}'") + logger.debug( + f"Loaded {len(tools_result.tools)} tools from session '{self.config.key}'" + ) class ExtendedCallToolRequestParams(CallToolRequestParams):