From d9b571620efda20ea914070c9f8e6978cad84ba1 Mon Sep 17 00:00:00 2001 From: Brian Krabach Date: Sun, 1 Dec 2024 13:08:43 -0800 Subject: [PATCH] fix Explorer handling of reasoning model responses and adds option to only respond to at mentions (#274) * Prior change neglected to assign completion response content to assistant response, so it said "no response" despite having one visible in debug - this fixes the issue * Adds an option that allows the assistant to be configured to only respond to at mentions, which is useful for putting multiple instances of the assistant in the same conversation and choose, per turn, which one(s) you want to respond --- .../explorer-assistant/assistant/chat.py | 61 +++++++++++++++++-- .../explorer-assistant/assistant/config.py | 8 +++ .../assistant/response/response_openai.py | 2 + .../skill-library/.vscode/settings.json | 5 ++ 4 files changed, 70 insertions(+), 6 deletions(-) diff --git a/assistants/explorer-assistant/assistant/chat.py b/assistants/explorer-assistant/assistant/chat.py index 5a647f9e..decdae2c 100644 --- a/assistants/explorer-assistant/assistant/chat.py +++ b/assistants/explorer-assistant/assistant/chat.py @@ -123,12 +123,8 @@ async def on_message_created( - @assistant.events.conversation.message.on_created """ - # ignore messages that are directed at a participant other than this assistant - if message.metadata.get("directed_at") and message.metadata["directed_at"] != context.assistant.id: - return - - # ignore messages that @mention a participant other than this assistant - if message.metadata.get("mentions") and context.assistant.id not in message.metadata["mentions"]: + # check if the assistant should respond to the message + if not await should_respond_to_message(context, message): return # update the participant status to indicate the assistant is thinking @@ -157,6 +153,59 @@ async def on_message_created( ) +async def should_respond_to_message(context: ConversationContext, message: ConversationMessage) -> bool: + """ + Determine if the assistant should respond to the message. + + This method can be used to implement custom logic to determine if the assistant should respond to a message. + By default, the assistant will respond to all messages. + + Args: + context: The conversation context. + message: The message to evaluate. + + Returns: + bool: True if the assistant should respond to the message; otherwise, False. + """ + config = await assistant_config.get(context.assistant) + + # ignore messages that are directed at a participant other than this assistant + if message.metadata.get("directed_at") and message.metadata["directed_at"] != context.assistant.id: + return False + + # if configure to only respond to mentions, ignore messages where the content does not mention the assistant somewhere in the message + if config.only_respond_to_mentions and f"@{context.assistant.name}" not in message.content: + # check to see if there are any other assistants in the conversation + participant_list = await context.get_participants() + other_assistants = [ + participant + for participant in participant_list.participants + if participant.role == "assistant" and participant.id != context.assistant.id + ] + if len(other_assistants) == 0: + # no other assistants in the conversation, check the last 10 notices to see if the assistant has warned the user + assistant_messages = await context.get_messages( + participant_ids=[context.assistant.id], message_types=[MessageType.notice], limit=10 + ) + at_mention_warning_key = "at_mention_warning" + if len(assistant_messages.messages) == 0 or all( + at_mention_warning_key not in message.metadata for message in assistant_messages.messages + ): + # assistant has not been mentioned in the last 10 messages, send a warning message in case the user is not aware + # that the assistant needs to be mentioned to receive a response + await context.send_messages( + NewConversationMessage( + content=f"{context.assistant.name} is configured to only respond to messages that @mention it. Please @mention the assistant in your message to receive a response.", + message_type=MessageType.notice, + metadata={at_mention_warning_key: True}, + ) + ) + + return False + + return True + + @assistant.events.conversation.on_created async def on_conversation_created(context: ConversationContext) -> None: """ diff --git a/assistants/explorer-assistant/assistant/config.py b/assistants/explorer-assistant/assistant/config.py index 75ce88aa..b1645645 100644 --- a/assistants/explorer-assistant/assistant/config.py +++ b/assistants/explorer-assistant/assistant/config.py @@ -143,6 +143,14 @@ class AssistantConfigModel(BaseModel): " context of our conversation. Where would you like to start?" ) + only_respond_to_mentions: Annotated[ + bool, + Field( + title="Only Respond to @Mentions", + description="Only respond to messages that @mention the assistant.", + ), + ] = False + high_token_usage_warning: Annotated[ HighTokenUsageWarning, Field( diff --git a/assistants/explorer-assistant/assistant/response/response_openai.py b/assistants/explorer-assistant/assistant/response/response_openai.py index b90bddde..e4851540 100644 --- a/assistants/explorer-assistant/assistant/response/response_openai.py +++ b/assistants/explorer-assistant/assistant/response/response_openai.py @@ -127,6 +127,8 @@ async def get_response( max_completion_tokens=self.request_config.response_tokens, ) + response_result.content = completion.choices[0].message.content + elif self.assistant_config.extensions_config.artifacts.enabled: response = await self.artifacts_extension.get_openai_completion_response( client, diff --git a/libraries/python/skills/skill-library/.vscode/settings.json b/libraries/python/skills/skill-library/.vscode/settings.json index 2c7a0316..07ab0453 100644 --- a/libraries/python/skills/skill-library/.vscode/settings.json +++ b/libraries/python/skills/skill-library/.vscode/settings.json @@ -53,8 +53,12 @@ "uv.lock" ], "cSpell.words": [ + "addopts", + "asctime", + "asyncio", "dotenv", "httpx", + "levelname", "metadrive", "openai", "pydantic", @@ -63,6 +67,7 @@ "pytest", "runtimes", "subdrive", + "testpaths", "tiktoken" ], "python.testing.pytestArgs": ["skill_library"],