From 9d23e4259c37ddddb623a0c6a082e80c97ea29fb Mon Sep 17 00:00:00 2001 From: Mollie Munoz Date: Wed, 23 Oct 2024 09:35:48 -0700 Subject: [PATCH] Doc agent re-flow changes + remove rest of GC agent. (#160) - Updates rest of GC agent removal from prospector. - New WIP changes for document agent. - Moves GC agent config material under document/ for future reference if needed. --- .../config.py | 82 ++--- .../config_defaults.py | 0 .../draft_grant_proposal_config_defaults.py | 0 .../agents/document/guided_conversation.py | 145 +++++++++ .../assistant/agents/document_agent.py | 293 ++++++++++++++---- .../agents/guided_conversation_agent.py | 286 ----------------- .../prospector-assistant/assistant/chat.py | 27 +- .../prospector-assistant/assistant/config.py | 10 - 8 files changed, 441 insertions(+), 402 deletions(-) rename assistants/prospector-assistant/assistant/agents/{guided_conversation => document}/config.py (54%) rename assistants/prospector-assistant/assistant/agents/{guided_conversation => document}/config_defaults.py (100%) rename assistants/prospector-assistant/assistant/agents/{guided_conversation => document}/draft_grant_proposal_config_defaults.py (100%) create mode 100644 assistants/prospector-assistant/assistant/agents/document/guided_conversation.py delete mode 100644 assistants/prospector-assistant/assistant/agents/guided_conversation_agent.py diff --git a/assistants/prospector-assistant/assistant/agents/guided_conversation/config.py b/assistants/prospector-assistant/assistant/agents/document/config.py similarity index 54% rename from assistants/prospector-assistant/assistant/agents/guided_conversation/config.py rename to assistants/prospector-assistant/assistant/agents/document/config.py index d76dedab..f920d7de 100644 --- a/assistants/prospector-assistant/assistant/agents/guided_conversation/config.py +++ b/assistants/prospector-assistant/assistant/agents/document/config.py @@ -1,59 +1,58 @@ import json -from typing import Annotated, Any, Dict, List, Type, get_type_hints +from typing import TYPE_CHECKING, Annotated, Any, Dict, List, Type from guided_conversation.utils.resources import ResourceConstraint, ResourceConstraintMode, ResourceConstraintUnit from pydantic import BaseModel, Field, create_model -from pydantic_core import PydanticUndefinedType from semantic_workbench_assistant.config import UISchema from ... import helpers -from . import draft_grant_proposal_config_defaults as config_defaults +from . import config_defaults as config_defaults + +if TYPE_CHECKING: + pass + # # region Helpers # +# take a full json schema and return a pydantic model, including support for +# nested objects and typed arrays -def determine_type(type_str: str) -> Type: - type_mapping = {"str": str, "int": int, "float": float, "bool": bool, "list": List[Any], "dict": Dict[str, Any]} - return type_mapping.get(type_str, Any) +def json_type_to_python_type(json_type: str) -> Type: + # Mapping JSON types to Python types + type_mapping = {"integer": int, "string": str, "number": float, "boolean": bool, "object": dict, "array": list} + return type_mapping.get(json_type, Any) -def create_pydantic_model_from_json(json_data: str) -> Type[BaseModel]: - data = json.loads(json_data) - def create_fields(data: Dict[str, Any]) -> Dict[str, Any]: +def create_pydantic_model_from_json_schema(schema: Dict[str, Any], model_name="DynamicModel") -> Type[BaseModel]: + # Nested function to parse properties from the schema + def parse_properties(properties: Dict[str, Any]) -> Dict[str, Any]: fields = {} - for key, value in data.items(): - if value["type"] == "dict": - nested_model = create_pydantic_model_from_json(json.dumps(value["value"])) - fields[key] = (nested_model, Field(description=value["description"])) + for prop_name, prop_attrs in properties.items(): + prop_type = prop_attrs.get("type") + description = prop_attrs.get("description", None) + + if prop_type == "object": + nested_model = create_pydantic_model_from_json_schema(prop_attrs, model_name=prop_name.capitalize()) + fields[prop_name] = (nested_model, Field(..., description=description)) + elif prop_type == "array": + items = prop_attrs.get("items", {}) + if items.get("type") == "object": + nested_model = create_pydantic_model_from_json_schema(items) + fields[prop_name] = (List[nested_model], Field(..., description=description)) + else: + nested_type = json_type_to_python_type(items.get("type")) + fields[prop_name] = (List[nested_type], Field(..., description=description)) else: - fields[key] = ( - determine_type(value["type"]), - Field(default=value["value"], description=value["description"]), - ) + python_type = json_type_to_python_type(prop_type) + fields[prop_name] = (python_type, Field(..., description=description)) return fields - fields = create_fields(data) - return create_model("DynamicModel", **fields) - - -def pydantic_model_to_json(model: BaseModel) -> Dict[str, Any]: - def get_type_str(py_type: Any) -> str: - type_mapping = {str: "str", int: "int", float: "float", bool: "bool", list: "list", dict: "dict"} - return type_mapping.get(py_type, "any") - - json_dict = {} - for field_name, field in model.model_fields.items(): - field_type = get_type_hints(model)[field_name] - default_value = field.default if not isinstance(field.default, PydanticUndefinedType) else "" - json_dict[field_name] = { - "value": default_value, - "type": get_type_str(field_type), - "description": field.description or "", - } - return json_dict + properties = schema.get("properties", {}) + fields = parse_properties(properties) + return create_model(model_name, **fields) # endregion @@ -77,13 +76,13 @@ class GuidedConversationAgentConfigModel(BaseModel): title="Artifact", description="The artifact that the agent will manage.", ), - UISchema(widget="textarea"), - ] = json.dumps(pydantic_model_to_json(config_defaults.ArtifactModel), indent=2) # type: ignore + UISchema(widget="baseModelEditor"), + ] = json.dumps(config_defaults.ArtifactModel.model_json_schema(), indent=2) rules: Annotated[ list[str], Field(title="Rules", description="Do's and don'ts that the agent should attempt to follow"), - UISchema(schema={"items": {"ui:widget": "textarea"}}), + UISchema(schema={"items": {"ui:widget": "textarea", "ui:options": {"rows": 2}}}), ] = config_defaults.rules conversation_flow: Annotated[ @@ -92,7 +91,7 @@ class GuidedConversationAgentConfigModel(BaseModel): title="Conversation Flow", description="A loose natural language description of the steps of the conversation", ), - UISchema(widget="textarea", placeholder="[optional]"), + UISchema(widget="textarea", schema={"ui:options": {"rows": 10}}, placeholder="[optional]"), ] = config_defaults.conversation_flow.strip() context: Annotated[ @@ -141,7 +140,8 @@ class ResourceConstraint(ResourceConstraint): ] = ResourceConstraint() def get_artifact_model(self) -> Type[BaseModel]: - return create_pydantic_model_from_json(self.artifact) + schema = json.loads(self.artifact) + return create_pydantic_model_from_json_schema(schema) # endregion diff --git a/assistants/prospector-assistant/assistant/agents/guided_conversation/config_defaults.py b/assistants/prospector-assistant/assistant/agents/document/config_defaults.py similarity index 100% rename from assistants/prospector-assistant/assistant/agents/guided_conversation/config_defaults.py rename to assistants/prospector-assistant/assistant/agents/document/config_defaults.py diff --git a/assistants/prospector-assistant/assistant/agents/guided_conversation/draft_grant_proposal_config_defaults.py b/assistants/prospector-assistant/assistant/agents/document/draft_grant_proposal_config_defaults.py similarity index 100% rename from assistants/prospector-assistant/assistant/agents/guided_conversation/draft_grant_proposal_config_defaults.py rename to assistants/prospector-assistant/assistant/agents/document/draft_grant_proposal_config_defaults.py diff --git a/assistants/prospector-assistant/assistant/agents/document/guided_conversation.py b/assistants/prospector-assistant/assistant/agents/document/guided_conversation.py new file mode 100644 index 00000000..1dc7a84e --- /dev/null +++ b/assistants/prospector-assistant/assistant/agents/document/guided_conversation.py @@ -0,0 +1,145 @@ +import json +import logging +from pathlib import Path + +from guided_conversation.guided_conversation_agent import GuidedConversation +from openai import AsyncOpenAI +from semantic_kernel import Kernel +from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion +from semantic_workbench_assistant.assistant_app import ( + ConversationContext, + storage_directory_for_context, +) + +from ...config import AssistantConfigModel +from .config import GuidedConversationAgentConfigModel + +logger = logging.getLogger(__name__) + + +# +# region Agent +# + + +class GuidedConversationAgent: + """ + An agent for managing artifacts. + """ + + @staticmethod + def get_state( + conversation_context: ConversationContext, + ) -> dict | None: + """ + Get the state of the guided conversation agent. + """ + return _read_guided_conversation_state(conversation_context) + + @staticmethod + async def step_conversation( + config: AssistantConfigModel, + openai_client: AsyncOpenAI, + agent_config: GuidedConversationAgentConfigModel, + conversation_context: ConversationContext, + last_user_message: str | None, + ) -> tuple[str | None, bool]: + """ + Step the conversation to the next turn. + """ + + rules = agent_config.rules + conversation_flow = agent_config.conversation_flow + context = agent_config.context + resource_constraint = agent_config.resource_constraint + artifact = agent_config.get_artifact_model() + + kernel = Kernel() + service_id = "gc_main" + + chat_service = OpenAIChatCompletion( + service_id=service_id, + async_client=openai_client, + ai_model_id=config.request_config.openai_model, + ) + kernel.add_service(chat_service) + + guided_conversation_agent: GuidedConversation + + state = _read_guided_conversation_state(conversation_context) + if state: + guided_conversation_agent = GuidedConversation.from_json( + json_data=state, + kernel=kernel, + artifact=artifact, # type: ignore + conversation_flow=conversation_flow, + context=context, + rules=rules, + resource_constraint=resource_constraint, + service_id=service_id, + ) + else: + guided_conversation_agent = GuidedConversation( + kernel=kernel, + artifact=artifact, # type: ignore + conversation_flow=conversation_flow, + context=context, + rules=rules, + resource_constraint=resource_constraint, + service_id=service_id, + ) + + # Step the conversation to start the conversation with the agent + # or message + result = await guided_conversation_agent.step_conversation(last_user_message) + + # Save the state of the guided conversation agent + _write_guided_conversation_state(conversation_context, guided_conversation_agent.to_json()) + + return result.ai_message, result.is_conversation_over + + # endregion + + +# +# region Helpers +# + + +def _get_guided_conversation_storage_path(context: ConversationContext, filename: str | None = None) -> Path: + """ + Get the path to the directory for storing guided conversation files. + """ + path = storage_directory_for_context(context) / "guided-conversation" + if filename: + path /= filename + return path + + +def _write_guided_conversation_state(context: ConversationContext, state: dict) -> None: + """ + Write the state of the guided conversation agent to a file. + """ + json_data = json.dumps(state) + path = _get_guided_conversation_storage_path(context) + if not path.exists(): + path.mkdir(parents=True) + path = path / "state.json" + path.write_text(json_data) + + +def _read_guided_conversation_state(context: ConversationContext) -> dict | None: + """ + Read the state of the guided conversation agent from a file. + """ + path = _get_guided_conversation_storage_path(context, "state.json") + if path.exists(): + try: + json_data = path.read_text() + return json.loads(json_data) + except Exception: + pass + return None + + +# endregion diff --git a/assistants/prospector-assistant/assistant/agents/document_agent.py b/assistants/prospector-assistant/assistant/agents/document_agent.py index 6f414de5..b3dd6605 100644 --- a/assistants/prospector-assistant/assistant/agents/document_agent.py +++ b/assistants/prospector-assistant/assistant/agents/document_agent.py @@ -1,5 +1,5 @@ import logging -from os import path +from enum import Enum from typing import Any, Callable import deepmerge @@ -9,6 +9,7 @@ ChatCompletionMessageParam, ChatCompletionSystemMessageParam, ) +from pydantic import BaseModel from semantic_workbench_api_model.workbench_model import ( ConversationMessage, ConversationParticipant, @@ -17,16 +18,30 @@ ) from semantic_workbench_assistant.assistant_app import ( ConversationContext, - storage_directory_for_context, ) from ..config import AssistantConfigModel +from .document.config import GuidedConversationAgentConfigModel +from .document.guided_conversation import GuidedConversationAgent logger = logging.getLogger(__name__) + # # region Agent # +class RoutineMode(Enum): + UNDEFINED = 1 + E2E_DRAFT_OUTLINE = 2 # change name later + + +class Routine(BaseModel): + mode: RoutineMode = RoutineMode.UNDEFINED + step: Callable | None = None + + +class State(BaseModel): + routine: Routine = Routine() class DocumentAgent: @@ -34,9 +49,11 @@ class DocumentAgent: An agent for working on document content: creation, editing, translation, etc. """ + state: State = State() + def __init__(self, attachments_extension: AttachmentsExtension) -> None: self.attachments_extension = attachments_extension - self._commands = [self.draft_outline] + self._commands = [self.set_draft_outline_mode] # self.draft_outline] @property def commands(self) -> list[Callable]: @@ -59,85 +76,214 @@ async def receive_command( if command.__name__ == msg_command_name: logger.info(f"Found command {message.command_name}") command_found = True - await command(config, context, message, metadata) # TO DO, handle commands with args + command(config, context, message, metadata) # does not handle command with args or async commands break if not command_found: logger.warning(f"Could not find command {message.command_name}") - async def draft_outline( + def respond_to_conversation( self, config: AssistantConfigModel, context: ConversationContext, message: ConversationMessage, metadata: dict[str, Any] = {}, ) -> None: - method_metadata_key = "draft_outline" - - # get conversation related info - conversation = await context.get_messages(before=message.id) - if message.message_type == MessageType.chat: - conversation.messages.append(message) - participants_list = await context.get_participants(include_inactive=True) - - # get attachments related info - attachment_messages = await self.attachments_extension.get_completion_messages_for_attachments( - context, config=config.agents_config.attachment_agent - ) - - # get outline related info - outline: str | None = None - if path.exists(storage_directory_for_context(context) / "outline.txt"): - outline = (storage_directory_for_context(context) / "outline.txt").read_text() - - # create chat completion messages - chat_completion_messages: list[ChatCompletionMessageParam] = [] - chat_completion_messages.append(_main_system_message()) - chat_completion_messages.append( - _chat_history_system_message(conversation.messages, participants_list.participants) - ) - chat_completion_messages.extend(attachment_messages) - if outline is not None: - chat_completion_messages.append(_outline_system_message(outline)) - - # make completion call to openai - async with openai_client.create_client(config.service_config) as client: + # check state mode + match self.state.routine.mode: + case RoutineMode.UNDEFINED: + logger.info("Document Agent has no routine mode set. Returning.") + return + case RoutineMode.E2E_DRAFT_OUTLINE: + return self._run_e2e_draft_outline() + + @classmethod + def set_draft_outline_mode( + cls, + config: AssistantConfigModel, + context: ConversationContext, + message: ConversationMessage, + metadata: dict[str, Any] = {}, + ) -> None: + if cls.state.routine.mode is RoutineMode.UNDEFINED: + cls.state.routine.mode = RoutineMode.E2E_DRAFT_OUTLINE + else: + logger.info( + f"Document Agent in the middle of routine: {cls.state.routine.mode}. Cannot change routine modes." + ) + + def _run_e2e_draft_outline(self) -> None: + logger.info("In _run_e2e_draft_outline") + return + + async def _gc_respond_to_conversation( + self, + config: AssistantConfigModel, + gc_config: GuidedConversationAgentConfigModel, + context: ConversationContext, + metadata: dict[str, Any] = {}, + ) -> None: + method_metadata_key = "document_agent_gc_response" + is_conversation_over = False + last_user_message = None + + while not is_conversation_over: try: - completion_args = { - "messages": chat_completion_messages, - "model": config.request_config.openai_model, - "response_format": {"type": "text"}, - } - completion = await client.chat.completions.create(**completion_args) - content = completion.choices[0].message.content - _on_success_metadata_update(metadata, method_metadata_key, config, chat_completion_messages, completion) + response_message, is_conversation_over = await GuidedConversationAgent.step_conversation( + config=config, + openai_client=openai_client.create_client(config.service_config), + agent_config=gc_config, + conversation_context=context, + last_user_message=last_user_message, + ) + if response_message is None: + # need to double check this^^ None logic, when it would occur in GC. Make "" for now. + agent_message = "" + else: + agent_message = response_message + + if not is_conversation_over: + # add the completion to the metadata for debugging + deepmerge.always_merger.merge( + metadata, + { + "debug": { + f"{method_metadata_key}": {"response": agent_message}, + } + }, + ) + else: + break except Exception as e: - logger.exception(f"exception occurred calling openai chat completion: {e}") - content = ( - "An error occurred while calling the OpenAI API. Is it configured correctly?" - "View the debug inspector for more information." + logger.exception(f"exception occurred processing guided conversation: {e}") + agent_message = "An error occurred while processing the guided conversation." + deepmerge.always_merger.merge( + metadata, + { + "debug": { + f"{method_metadata_key}": { + "error": str(e), + }, + } + }, ) - _on_error_metadata_update(metadata, method_metadata_key, config, chat_completion_messages, e) - # store only latest version for now (will keep all versions later as need arises) - (storage_directory_for_context(context) / "outline.txt").write_text(content) + # send the response to the conversation + await context.send_messages( + NewConversationMessage( + content=agent_message, + message_type=MessageType.chat, + metadata=metadata, + ) + ) - # send the response to the conversation - message_type = MessageType.chat - if message.message_type == MessageType.command: - message_type = MessageType.command_response + # async def draft_outline( + # self, + # config: AssistantConfigModel, + # context: ConversationContext, + # message: ConversationMessage, + # metadata: dict[str, Any] = {}, + # ) -> tuple[str, dict[str, Any]]: + # method_metadata_key = "draft_outline" - await context.send_messages( - NewConversationMessage( - content=content, - message_type=message_type, - metadata=metadata, - ) - ) + +# +# # get conversation related info +# conversation = await context.get_messages(before=message.id) +# if message.message_type == MessageType.chat: +# conversation.messages.append(message) +# participants_list = await context.get_participants(include_inactive=True) +# +# # get attachments related info +# attachment_messages = await self.attachments_extension.get_completion_messages_for_attachments( +# context, config=config.agents_config.attachment_agent +# ) +# +# # get outline related info +# outline: str | None = None +# if path.exists(storage_directory_for_context(context) / "outline.txt"): +# outline = (storage_directory_for_context(context) / "outline.txt").read_text() +# +# # create chat completion messages +# chat_completion_messages: list[ChatCompletionMessageParam] = [] +# chat_completion_messages.append(_main_system_message()) +# chat_completion_messages.append( +# _chat_history_system_message(conversation.messages, participants_list.participants) +# ) +# chat_completion_messages.extend(attachment_messages) +# if outline is not None: +# chat_completion_messages.append(_outline_system_message(outline)) +# +# # make completion call to openai +# async with openai_client.create_client(config.service_config) as client: +# try: +# completion_args = { +# "messages": chat_completion_messages, +# "model": config.request_config.openai_model, +# "response_format": {"type": "text"}, +# } +# completion = await client.chat.completions.create(**completion_args) +# content = completion.choices[0].message.content +# _on_success_metadata_update(metadata, method_metadata_key, config, chat_completion_messages, completion) +# +# except Exception as e: +# logger.exception(f"exception occurred calling openai chat completion: {e}") +# content = ( +# "An error occurred while calling the OpenAI API. Is it configured correctly?" +# "View the debug inspector for more information." +# ) +# _on_error_metadata_update(metadata, method_metadata_key, config, chat_completion_messages, e) +# +# # store only latest version for now (will keep all versions later as need arises) +# (storage_directory_for_context(context) / "outline.txt").write_text(content) +# +# # send the response to the conversation only if from a command. Otherwise return info to caller. +# message_type = MessageType.chat +# if message.message_type == MessageType.command: +# message_type = MessageType.command +# +# await context.send_messages( +# NewConversationMessage( +# content=content, +# message_type=message_type, +# metadata=metadata, +# ) +# ) +# +# return content, metadata # endregion + +# +# region Inspector +# + + +# class DocumentAgentConversationInspectorStateProvider: +# display_name = "Guided Conversation" +# description = "State of the guided conversation feature within the conversation." +# +# def __init__( +# self, +# config_provider: BaseModelAssistantConfig["AssistantConfigModel"], +# ) -> None: +# self.config_provider = config_provider +# +# async def get(self, context: ConversationContext) -> AssistantConversationInspectorStateDataModel: +# """ +# Get the state for the conversation. +# """ +# +# state = _read_guided_conversation_state(context) +# +# return AssistantConversationInspectorStateDataModel(data=state or {"content": "No state available."}) +# +# +## endregion + + # # region Message Helpers # @@ -258,3 +404,28 @@ def _format_message(message: ConversationMessage, participants: list[Conversatio # endregion + +# +# region GC agent config temp +# +# pull in GC config with its defaults, and then make changes locally here for now. +gc_config = GuidedConversationAgentConfigModel() + + +# endregion + + +##### FROM NOTEBOOK +# await document_skill.draft_outline(context=unused, openai_client=async_client, model=model) +# +# decision, user_feedback = await document_skill.get_user_feedback( +# context=unused, openai_client=async_client, model=model, outline=True +# ) +# +# while decision == "[ITERATE]": +# await document_skill.draft_outline( +# context=unused, openai_client=async_client, model=model, user_feedback=user_feedback +# ) +# decision, user_feedback = await document_skill.get_user_feedback( +# context=unused, openai_client=async_client, model=model, outline=True +# ) diff --git a/assistants/prospector-assistant/assistant/agents/guided_conversation_agent.py b/assistants/prospector-assistant/assistant/agents/guided_conversation_agent.py deleted file mode 100644 index 54611224..00000000 --- a/assistants/prospector-assistant/assistant/agents/guided_conversation_agent.py +++ /dev/null @@ -1,286 +0,0 @@ -import json -import logging -from pathlib import Path -from typing import TYPE_CHECKING, Any - -import deepmerge -import openai_client -from assistant.agents.guided_conversation.config import GuidedConversationAgentConfigModel -from guided_conversation.guided_conversation_agent import GuidedConversation -from openai import AsyncOpenAI -from openai.types.chat import ChatCompletionMessageParam -from semantic_kernel import Kernel -from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion -from semantic_workbench_api_model.workbench_model import ( - AssistantStateEvent, - MessageType, - NewConversationMessage, - ParticipantRole, -) -from semantic_workbench_assistant.assistant_app import ( - AssistantConversationInspectorStateDataModel, - BaseModelAssistantConfig, - ConversationContext, - storage_directory_for_context, -) - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from ..config import AssistantConfigModel, RequestConfig - - -# -# region Agent -# - - -class GuidedConversationAgent: - """ - An agent for managing artifacts. - """ - - def __init__( - self, - config_provider: BaseModelAssistantConfig["AssistantConfigModel"], - ) -> None: - self.config_provider = config_provider - - @staticmethod - def get_state( - conversation_context: ConversationContext, - ) -> dict | None: - """ - Get the state of the guided conversation agent. - """ - return _read_guided_conversation_state(conversation_context) - - @staticmethod - async def step_conversation( - conversation_context: ConversationContext, - openai_client: AsyncOpenAI, - request_config: "RequestConfig", - agent_config: GuidedConversationAgentConfigModel, - additional_messages: list[ChatCompletionMessageParam] | None = None, - ) -> str | None: - """ - Step the conversation to the next turn. - """ - - rules = agent_config.rules - conversation_flow = agent_config.conversation_flow - context = agent_config.context - resource_constraint = agent_config.resource_constraint - artifact = agent_config.get_artifact_model() - - kernel = Kernel() - service_id = "gc_main" - - chat_service = OpenAIChatCompletion( - service_id=service_id, - async_client=openai_client, - ai_model_id=request_config.openai_model, - ) - kernel.add_service(chat_service) - - guided_conversation_agent: GuidedConversation - - state = _read_guided_conversation_state(conversation_context) - if state: - guided_conversation_agent = GuidedConversation.from_json( - json_data=state, - kernel=kernel, - artifact=artifact, # type: ignore - conversation_flow=conversation_flow, - context=context, - rules=rules, - resource_constraint=resource_constraint, - service_id=service_id, - ) - else: - guided_conversation_agent = GuidedConversation( - kernel=kernel, - artifact=artifact, # type: ignore - conversation_flow=conversation_flow, - context=context, - rules=rules, - resource_constraint=resource_constraint, - service_id=service_id, - ) - - # Get the latest message from the user - messages_response = await conversation_context.get_messages(limit=1, participant_role=ParticipantRole.user) - last_user_message = messages_response.messages[0].content if messages_response.messages else None - - # Step the conversation to start the conversation with the agent - result = await guided_conversation_agent.step_conversation(last_user_message) - - # Save the state of the guided conversation agent - _write_guided_conversation_state(conversation_context, guided_conversation_agent.to_json()) - - return result.ai_message - - # endregion - - # - # region Response - # - - # demonstrates how to respond to a conversation message using the guided conversation library - async def respond_to_conversation( - self, - context: ConversationContext, - metadata: dict[str, Any] = {}, - additional_messages: list[ChatCompletionMessageParam] | None = None, - ) -> None: - """ - Respond to a conversation message. - - This method uses the guided conversation agent to respond to a conversation message. The guided conversation - agent is designed to guide the conversation towards a specific goal as specified in its definition. - """ - - # define the metadata key for any metadata created within this method - method_metadata_key = "respond_to_conversation" - - # get the assistant configuration - assistant_config = await self.config_provider.get(context.assistant) - - # initialize variables for the response content - content: str | None = None - - try: - content = await self.step_conversation( - conversation_context=context, - openai_client=openai_client.create_client(assistant_config.service_config), - request_config=assistant_config.request_config, - agent_config=assistant_config.agents_config.guided_conversation_agent, - additional_messages=additional_messages, - ) - # add the completion to the metadata for debugging - deepmerge.always_merger.merge( - metadata, - { - "debug": { - f"{method_metadata_key}": {"response": content}, - } - }, - ) - except Exception as e: - logger.exception(f"exception occurred processing guided conversation: {e}") - content = "An error occurred while processing the guided conversation." - deepmerge.always_merger.merge( - metadata, - { - "debug": { - f"{method_metadata_key}": { - "error": str(e), - }, - } - }, - ) - - # add the state to the metadata for debugging - state = self.get_state(context) - deepmerge.always_merger.merge( - metadata, - { - "debug": { - f"{method_metadata_key}": { - "state": state, - }, - } - }, - ) - - # send the response to the conversation - await context.send_messages( - NewConversationMessage( - content=content or "[no response from assistant]", - message_type=MessageType.chat if content else MessageType.note, - metadata=metadata, - ) - ) - - await context.send_conversation_state_event( - AssistantStateEvent( - state_id="guided_conversation", - event="updated", - state=None, - ) - ) - - -# endregion - - -# -# region Inspector -# - - -class GuidedConversationConversationInspectorStateProvider: - display_name = "Guided Conversation" - description = "State of the guided conversation feature within the conversation." - - def __init__( - self, - config_provider: BaseModelAssistantConfig["AssistantConfigModel"], - ) -> None: - self.config_provider = config_provider - - async def get(self, context: ConversationContext) -> AssistantConversationInspectorStateDataModel: - """ - Get the state for the conversation. - """ - - state = _read_guided_conversation_state(context) - - return AssistantConversationInspectorStateDataModel(data=state or {"content": "No state available."}) - - -# endregion - - -# -# region Helpers -# - - -def _get_guided_conversation_storage_path(context: ConversationContext, filename: str | None = None) -> Path: - """ - Get the path to the directory for storing guided conversation files. - """ - path = storage_directory_for_context(context) / "guided-conversation" - if filename: - path /= filename - return path - - -def _write_guided_conversation_state(context: ConversationContext, state: dict) -> None: - """ - Write the state of the guided conversation agent to a file. - """ - json_data = json.dumps(state) - path = _get_guided_conversation_storage_path(context) - if not path.exists(): - path.mkdir(parents=True) - path = path / "state.json" - path.write_text(json_data) - - -def _read_guided_conversation_state(context: ConversationContext) -> dict | None: - """ - Read the state of the guided conversation agent from a file. - """ - path = _get_guided_conversation_storage_path(context, "state.json") - if path.exists(): - try: - json_data = path.read_text() - return json.loads(json_data) - except Exception: - pass - return None - - -# endregion diff --git a/assistants/prospector-assistant/assistant/chat.py b/assistants/prospector-assistant/assistant/chat.py index 0e5d9dea..827a6dec 100644 --- a/assistants/prospector-assistant/assistant/chat.py +++ b/assistants/prospector-assistant/assistant/chat.py @@ -99,6 +99,7 @@ async def content_evaluator_factory(context: ConversationContext) -> ContentSafe # - @assistant.events.conversation.message.on_created (event triggered when a new message of any type is created) # - @assistant.events.conversation.message.chat.on_created (event triggered when a new chat message is created) # +doc_agent_running = False @assistant.events.conversation.message.command.on_created @@ -108,11 +109,10 @@ async def on_command_message_created( config = await assistant_config.get(context.assistant) metadata: dict[str, Any] = {"debug": {"content_safety": event.data.get(content_safety.metadata_key, {})}} - # For now, handling only commands from Document Agent for exploration of implementation - # We assume Document Agent is available and future logic would determine which agent - # the command is intended for. Assumption made in order to make doc agent available asap. + # config.agents_config.document_agent.enabled = True # To do... tie into config. + global doc_agent_running + doc_agent_running = True - # if config.agents_config.document_agent.enabled: doc_agent = DocumentAgent(attachments_extension) await doc_agent.receive_command(config, context, message, metadata) @@ -148,6 +148,11 @@ async def on_message_created( # NOTE: we're experimenting with agents, if they are enabled, use them to respond to the conversation # + # if config.agents_config.document_agent.enabled: # To do... tie into config. + global doc_agent_running + if doc_agent_running: + return document_agent_respond_to_conversation(config, context, message, metadata) + # Prospector assistant response await respond_to_conversation(context, config, message, metadata) @@ -183,6 +188,20 @@ async def on_conversation_created(context: ConversationContext) -> None: # +def document_agent_respond_to_conversation( + config: AssistantConfigModel, + context: ConversationContext, + message: ConversationMessage, + metadata: dict[str, Any] = {}, +) -> None: + """ + Respond to a conversation message using the document agent. + """ + # create the document agent instance + document_agent = DocumentAgent(attachments_extension) + return document_agent.respond_to_conversation(config, context, message, metadata) + + # demonstrates how to respond to a conversation message using the OpenAI API. async def respond_to_conversation( context: ConversationContext, diff --git a/assistants/prospector-assistant/assistant/config.py b/assistants/prospector-assistant/assistant/config.py index b7987f02..e754649c 100644 --- a/assistants/prospector-assistant/assistant/config.py +++ b/assistants/prospector-assistant/assistant/config.py @@ -8,7 +8,6 @@ from . import helpers from .agents.artifact_agent import ArtifactAgentConfigModel -from .agents.guided_conversation.config import GuidedConversationAgentConfigModel # The semantic workbench app uses react-jsonschema-form for rendering # dynamic configuration forms based on the configuration model and UI schema @@ -42,15 +41,6 @@ class AgentsConfigModel(BaseModel): ), ] = AttachmentsConfigModel() - guided_conversation_agent: Annotated[ - GuidedConversationAgentConfigModel, - Field( - title="Guided Conversation Agent Configuration", - description="Configuration for the guided conversation agent.", - ), - UISchema(widget="hidden"), # Hide the guided conversation agent configuration for now, until we can remove it - ] = GuidedConversationAgentConfigModel() - class HighTokenUsageWarning(BaseModel): enabled: Annotated[