Skip to content

Commit

Permalink
splits mcp code to assistant/mcp extensions and runs open deep resear…
Browse files Browse the repository at this point in the history
…ch async with early pass at messaging (#329)

WIP check-in, using log messages to send status updates while tool is
running - will follow up with desired use of custom notifications
  • Loading branch information
bkrabach authored Feb 19, 2025
1 parent dbdb441 commit 3a53ade
Show file tree
Hide file tree
Showing 52 changed files with 1,836 additions and 649 deletions.
1 change: 0 additions & 1 deletion .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@
"ms-python.debugpy",
"ms-vscode.extension-test-runner",
"ms-python.python",
"ms-toolsai.datawrangler",
"ms-vscode.makefile-tools",
"ms-vscode.vscode-node-azure-pack",
"tamasfe.even-better-toml",
Expand Down
16 changes: 15 additions & 1 deletion assistants/codespace-assistant/.vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,14 @@
"editor.formatOnType": true,
"editor.formatOnSave": true,
"files.eol": "\n",
"files.exclude": {
"**/.git": true,
"**/.svn": true,
"**/.hg": true,
"**/CVS": true,
"**/.DS_Store": true,
"**/Thumbs.db": true
},
"files.trimTrailingWhitespace": true,
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
Expand All @@ -25,6 +33,9 @@
"python.analysis.inlayHints.functionReturnTypes": true,
"python.analysis.typeCheckingMode": "standard",
"python.defaultInterpreterPath": "${workspaceFolder}/.venv",
"python.testing.pytestEnabled": true,
"python.testing.cwd": "${workspaceFolder}",
"python.testing.pytestArgs": [],
"[python]": {
"editor.defaultFormatter": "charliermarsh.ruff",
"editor.formatOnSave": true,
Expand All @@ -38,8 +49,10 @@
"ruff.nativeServer": "on",
"search.exclude": {
"**/.venv": true,
"**/.data": true
"**/.data": true,
"**/__pycache__": true
},

// For use with optional extension: "streetsidesoftware.code-spell-checker"
"cSpell.ignorePaths": [
".venv",
Expand Down Expand Up @@ -69,6 +82,7 @@
"pydantic",
"pyproject",
"pyright",
"pytest",
"semanticworkbench",
"tiktoken",
"updown",
Expand Down
4 changes: 2 additions & 2 deletions assistants/codespace-assistant/assistant/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import deepmerge
from assistant_extensions.attachments import AttachmentsExtension
from assistant_extensions.mcp import MCPToolsConfigModel
from content_safety.evaluators import CombinedContentSafetyEvaluator
from semantic_workbench_api_model.workbench_model import (
ConversationEvent,
Expand All @@ -27,7 +28,6 @@
)

from .config import AssistantConfigModel
from .extensions.tools import ToolsConfigModel
from .response import respond_to_conversation

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -67,7 +67,7 @@ async def content_evaluator_factory(context: ConversationContext) -> ContentSafe
)


async def tools_config_provider(context: AssistantContext) -> ToolsConfigModel:
async def tools_config_provider(context: AssistantContext) -> MCPToolsConfigModel:
return (await assistant_config.get(context)).extensions_config.tools


Expand Down
6 changes: 3 additions & 3 deletions assistants/codespace-assistant/assistant/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@
OpenAIClientConfigModel,
)
from assistant_extensions.attachments import AttachmentsConfigModel
from assistant_extensions.mcp import MCPToolsConfigModel
from content_safety.evaluators import CombinedContentSafetyEvaluatorConfig
from openai_client import AzureOpenAIServiceConfig, OpenAIRequestConfig
from pydantic import BaseModel, Field
from semantic_workbench_assistant.config import UISchema

from . import helpers
from .extensions.tools import ToolsConfigModel

# The semantic workbench app uses react-jsonschema-form for rendering
# dynamic configuration forms based on the configuration model and UI schema
Expand All @@ -31,12 +31,12 @@

class ExtensionsConfigModel(BaseModel):
tools: Annotated[
ToolsConfigModel,
MCPToolsConfigModel,
Field(
title="Tools Configuration",
description="Configuration for the tools.",
),
] = ToolsConfigModel()
] = MCPToolsConfigModel()

attachments: Annotated[
AttachmentsConfigModel,
Expand Down

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from typing import List

import deepmerge
from assistant_extensions.mcp import ExtendedCallToolRequestParams, MCPSession, handle_mcp_tool_call
from openai.types.chat import (
ChatCompletion,
ChatCompletionToolMessageParam,
Expand All @@ -14,18 +15,13 @@
from semantic_workbench_api_model.workbench_model import (
MessageType,
NewConversationMessage,
UpdateParticipant,
)
from semantic_workbench_assistant.assistant_app import ConversationContext

from assistant.extensions.tools.__model import MCPSession

from ..extensions.tools import (
ToolCall,
handle_tool_call,
)
from .models import StepResult
from .utils import (
extract_content_from_tool_calls,
extract_content_from_mcp_tool_calls,
get_response_duration_message,
get_token_usage_message,
)
Expand Down Expand Up @@ -55,7 +51,7 @@ async def handle_error(error_message: str) -> StepResult:
step_result.status = "error"
return step_result

# Get service and request configuration for generative model
# get service and request configuration for generative model
generative_request_config = request_config

# get the total tokens used for the completion
Expand All @@ -67,10 +63,10 @@ async def handle_error(error_message: str) -> StepResult:
content = completion.choices[0].message.content

# check if the completion has tool calls
tool_calls: list[ToolCall] = []
tool_calls: list[ExtendedCallToolRequestParams] = []
if completion.choices[0].message.tool_calls:
ai_context, tool_calls = extract_content_from_tool_calls([
ToolCall(
ai_context, tool_calls = extract_content_from_mcp_tool_calls([
ExtendedCallToolRequestParams(
id=tool_call.id,
name=tool_call.function.name,
arguments=json.loads(
Expand Down Expand Up @@ -104,7 +100,7 @@ async def handle_error(error_message: str) -> StepResult:
deepmerge.always_merger.merge(
step_result.metadata,
{
"tool_calls": [tool_call.to_dict() for tool_call in tool_calls],
"tool_calls": [tool_call.model_dump(mode="json") for tool_call in tool_calls],
},
)

Expand Down Expand Up @@ -170,21 +166,32 @@ async def handle_error(error_message: str) -> StepResult:
tool_call_count = 0
for tool_call in tool_calls:
tool_call_count += 1
try:
tool_call_result = await handle_tool_call(
mcp_sessions,
tool_call,
f"{metadata_key}:request:tool_call_{tool_call_count}",
)
except Exception as e:
logger.exception(f"Error handling tool call: {e}")
return await handle_error("An error occurred while handling the tool call.")

tool_call_status = f"using tool `{tool_call.name}`"
async with context.set_status(f"{tool_call_status}..."):

async def on_logging_message(msg: str) -> None:
await context.update_participant_me(UpdateParticipant(status=f"{tool_call_status}: {msg}"))

try:
tool_call_result = await handle_mcp_tool_call(
mcp_sessions,
tool_call,
f"{metadata_key}:request:tool_call_{tool_call_count}",
on_logging_message,
)
except Exception as e:
logger.exception(f"Error handling tool call: {e}")
return await handle_error("An error occurred while handling the tool call.")

# Update content and metadata with tool call result metadata
deepmerge.always_merger.merge(step_result.metadata, tool_call_result.metadata)

content = (
tool_call_result.content if len(tool_call_result.content) > 0 else "[tool call returned no content]"
# FIXME only supporting 1 content item and it's text for now, should support other content types/quantity
# Get the content from the tool call result
content = next(
(content_item.text for content_item in tool_call_result.content if content_item.type == "text"),
"[tool call returned no content]",
)

# Add the token count for the tool call result to the total token count
Expand Down
Loading

0 comments on commit 3a53ade

Please sign in to comment.