diff --git a/assistants/prospector-assistant/assistant/agents/document_agent.py b/assistants/prospector-assistant/assistant/agents/document_agent.py index 2bd52674..5d5abe14 100644 --- a/assistants/prospector-assistant/assistant/agents/document_agent.py +++ b/assistants/prospector-assistant/assistant/agents/document_agent.py @@ -283,6 +283,7 @@ def _get_step_method(self, step: Step | None) -> Callable | None: return None return self._step_name_to_method.get(step.name) + # Not currently used async def receive_command( self, config: AssistantConfigModel, @@ -311,7 +312,7 @@ def _set_mode_draft_outline( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> None: # Pre-requisites @@ -328,11 +329,12 @@ def _set_mode_draft_outline( self._state.mode = Mode(name=ModeName.DRAFT_OUTLINE, status=Status.INITIATED) self._write_state(context) + # Not currently used def _set_mode_draft_paper( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> None: # Pre-requisites @@ -349,11 +351,11 @@ def _set_mode_draft_paper( self._state.mode = Mode(name=ModeName.DRAFT_PAPER, status=Status.INITIATED) self._write_state(context) - async def respond_to_conversation( + async def create_document( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> bool: self._state = self._read_state(context) @@ -364,15 +366,22 @@ async def respond_to_conversation( return False mode = self._state.mode + current_mode_name = mode.get_name() + correct_mode_name = ModeName.DRAFT_OUTLINE # Will update if not mode.is_running(): + self._set_mode_draft_outline( + config, context, message, metadata + ) # Will update this mode as implementation expands to full document. + elif current_mode_name is not correct_mode_name: logger.warning( - "Document Agent must be running in a mode to respond. Current mode: %s and status: %s", - mode.get_name(), - mode.get_status(), + "Document Agent not in correct mode. Returning. Current mode: %s Correct mode: %s", + current_mode_name, + correct_mode_name, ) return mode.is_running() # Run + mode = self._state.mode logger.info("Document Agent in mode %s", mode.get_name()) mode_method = self._get_mode_method(mode) if mode_method: @@ -404,7 +413,7 @@ async def _run_mode( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> Status: # Pre-requisites @@ -518,7 +527,7 @@ async def _mode_draft_outline( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> Status: # Pre-requisites @@ -576,7 +585,7 @@ async def _mode_draft_paper( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> Status: # Pre-requisites @@ -634,7 +643,7 @@ async def _step_gc_attachment_check( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: next_step = None @@ -675,7 +684,7 @@ async def _step_draft_outline( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: next_step = None @@ -715,7 +724,7 @@ async def _step_gc_get_outline_feedback( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: next_step_name = None @@ -763,7 +772,7 @@ async def _step_finish( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: # pretend completed @@ -773,7 +782,7 @@ async def _step_draft_content( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: next_step = None @@ -819,7 +828,7 @@ async def _gc_attachment_check( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: method_metadata_key = "document_agent_gc_attachment_check" @@ -846,8 +855,12 @@ async def _gc_attachment_check( # run guided conversation step try: + if message is None: + user_message = None + else: + user_message = message.content response_message, conversation_status, next_step_name = await guided_conversation.step_conversation( - last_user_message=message.content, + last_user_message=user_message, ) # add the completion to the metadata for debugging @@ -891,16 +904,18 @@ async def _draft_outline( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: method_metadata_key = "draft_outline" - # get conversation related info - conversation = await context.get_messages(before=message.id) - if message.message_type == MessageType.chat: - conversation.messages.append(message) - participants_list = await context.get_participants(include_inactive=True) + # get conversation related info -- for now, if no message, assuming no prior conversation + conversation = None + if message is not None: + conversation = await context.get_messages(before=message.id) + if message.message_type == MessageType.chat: + conversation.messages.append(message) + participants_list = await context.get_participants(include_inactive=True) # get attachments related info attachment_messages = await self._attachments_extension.get_completion_messages_for_attachments( @@ -916,9 +931,10 @@ async def _draft_outline( # create chat completion messages chat_completion_messages: list[ChatCompletionMessageParam] = [] chat_completion_messages.append(_draft_outline_main_system_message()) - chat_completion_messages.append( - _chat_history_system_message(conversation.messages, participants_list.participants) - ) + if conversation is not None: + chat_completion_messages.append( + _chat_history_system_message(conversation.messages, participants_list.participants) + ) chat_completion_messages.extend(attachment_messages) if outline is not None: chat_completion_messages.append(_outline_system_message(outline)) @@ -946,9 +962,9 @@ async def _draft_outline( # store only latest version for now (will keep all versions later as need arises) (storage_directory_for_context(context) / "document_agent/outline.txt").write_text(message_content) - # send the response to the conversation only if from a command. Otherwise return info to caller. + # send a command response to the conversation only if from a command. Otherwise return a normal chat message. message_type = MessageType.chat - if message.message_type == MessageType.command: + if message is not None and message.message_type == MessageType.command: message_type = MessageType.command await context.send_messages( @@ -976,11 +992,6 @@ async def _gc_outline_feedback( return Status.UNDEFINED, StepName.UNDEFINED # Run - if message is not None: - user_message = message.content - else: - user_message = None - gc_outline_feedback_config: GuidedConversationConfigModel = GCDraftOutlineFeedbackConfigModel() guided_conversation = GuidedConversation( @@ -1045,6 +1056,10 @@ async def _gc_outline_feedback( # run guided conversation step try: + if message is None: + user_message = None + else: + user_message = message.content response_message, conversation_status, next_step_name = await guided_conversation.step_conversation( last_user_message=user_message, ) @@ -1090,16 +1105,18 @@ async def _draft_content( self, config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, ) -> tuple[Status, StepName | None]: method_metadata_key = "draft_content" - # get conversation related info - conversation = await context.get_messages(before=message.id) - if message.message_type == MessageType.chat: - conversation.messages.append(message) - participants_list = await context.get_participants(include_inactive=True) + # get conversation related info -- for now, if no message, assuming no prior conversation + conversation = None + if message is not None: + conversation = await context.get_messages(before=message.id) + if message.message_type == MessageType.chat: + conversation.messages.append(message) + participants_list = await context.get_participants(include_inactive=True) # get attachments related info attachment_messages = await self._attachments_extension.get_completion_messages_for_attachments( @@ -1109,9 +1126,10 @@ async def _draft_content( # create chat completion messages chat_completion_messages: list[ChatCompletionMessageParam] = [] chat_completion_messages.append(_draft_content_main_system_message()) - chat_completion_messages.append( - _chat_history_system_message(conversation.messages, participants_list.participants) - ) + if conversation is not None: + chat_completion_messages.append( + _chat_history_system_message(conversation.messages, participants_list.participants) + ) chat_completion_messages.extend(attachment_messages) # get outline related info @@ -1135,12 +1153,12 @@ async def _draft_content( "response_format": {"type": "text"}, } completion = await client.chat.completions.create(**completion_args) - content = completion.choices[0].message.content + message_content = completion.choices[0].message.content _on_success_metadata_update(metadata, method_metadata_key, config, chat_completion_messages, completion) except Exception as e: logger.exception(f"exception occurred calling openai chat completion: {e}") - content = ( + message_content = ( "An error occurred while calling the OpenAI API. Is it configured correctly?" "View the debug inspector for more information." ) @@ -1148,16 +1166,16 @@ async def _draft_content( if content is not None: # store only latest version for now (will keep all versions later as need arises) - (storage_directory_for_context(context) / "document_agent/content.txt").write_text(content) + (storage_directory_for_context(context) / "document_agent/content.txt").write_text(message_content) - # send the response to the conversation only if from a command. Otherwise return info to caller. + # send a command response to the conversation only if from a command. Otherwise return a normal chat message. message_type = MessageType.chat - if message.message_type == MessageType.command: + if message is not None and message.message_type == MessageType.command: message_type = MessageType.command await context.send_messages( NewConversationMessage( - content=content, + content=message_content, message_type=message_type, metadata=metadata, ) diff --git a/assistants/prospector-assistant/assistant/chat.py b/assistants/prospector-assistant/assistant/chat.py index 2e90cd1e..c0226407 100644 --- a/assistants/prospector-assistant/assistant/chat.py +++ b/assistants/prospector-assistant/assistant/chat.py @@ -114,24 +114,6 @@ async def on_message_created( await legacy.provide_guidance_if_necessary(context) -is_doc_agent_running = False - - -@assistant.events.conversation.message.command.on_created -async def on_command_message_created( - context: ConversationContext, event: ConversationEvent, message: ConversationMessage -) -> None: - config = await assistant_config.get(context.assistant) - metadata: dict[str, Any] = {"debug": {"content_safety": event.data.get(content_safety.metadata_key, {})}} - - # config.agents_config.document_agent.enabled = True # To do... tie into config. - global is_doc_agent_running - is_doc_agent_running = True - - doc_agent = DocumentAgent(attachments_extension) - await doc_agent.receive_command(config, context, message, metadata) - - @assistant.events.conversation.message.chat.on_created async def on_chat_message_created( context: ConversationContext, event: ConversationEvent, message: ConversationMessage @@ -151,24 +133,19 @@ async def on_chat_message_created( # update the participant status to indicate the assistant is thinking async with send_error_message_on_exception(context), context.set_status("thinking..."): - config = await assistant_config.get(context.assistant) - - metadata: dict[str, Any] = {"debug": {"content_safety": event.data.get(content_safety.metadata_key, {})}} - # # NOTE: we're experimenting with agents, if they are enabled, use them to respond to the conversation # + config = await assistant_config.get(context.assistant) + metadata: dict[str, Any] = {"debug": {"content_safety": event.data.get(content_safety.metadata_key, {})}} - # if config.agents_config.document_agent.enabled: # To do... tie into config. - global is_doc_agent_running - if is_doc_agent_running: - is_doc_agent_running = await document_agent_respond_to_conversation(config, context, message, metadata) - return - - await form_fill_execute(context, message) - - # # Prospector assistant response - # await respond_to_conversation(context, config, message, metadata) + match config.guided_workflow: + case "Form Completion": + await form_fill_execute(context, message) + case "Document Creation": + await create_document_execute(config, context, message, metadata) + case _: + logger.error("Guided workflow unknown or not supported.") background_tasks: set[asyncio.Task] = set() @@ -179,32 +156,46 @@ async def on_conversation_created(context: ConversationContext) -> None: """ Handle the event triggered when the assistant is added to a conversation. """ - assistant_sent_messages = await context.get_messages(participant_ids=[context.assistant.id], limit=1) welcome_sent_before = len(assistant_sent_messages.messages) > 0 if welcome_sent_before: return - task = asyncio.create_task(welcome_message(context)) + # + # NOTE: we're experimenting with agents, if they are enabled, use them to respond to the conversation + # + config = await assistant_config.get(context.assistant) + metadata: dict[str, Any] = {"debug": {}} + + match config.guided_workflow: + case "Form Completion": + task = asyncio.create_task(welcome_message_form_fill(context)) + case "Document Creation": + task = asyncio.create_task( + welcome_message_create_document(config, context, message=None, metadata=metadata) + ) + case _: + logger.error("Guided workflow unknown or not supported.") + background_tasks.add(task) task.add_done_callback(background_tasks.remove) - # send a welcome message to the conversation - # welcome_message = config.welcome_message - # await context.send_messages( - # NewConversationMessage( - # content=welcome_message, - # message_type=MessageType.chat, - # metadata={"generated_content": False}, - # ) - # ) - -async def welcome_message(context: ConversationContext) -> None: +async def welcome_message_form_fill(context: ConversationContext) -> None: async with send_error_message_on_exception(context), context.set_status("thinking..."): await form_fill_execute(context, None) +async def welcome_message_create_document( + config: AssistantConfigModel, + context: ConversationContext, + message: ConversationMessage | None, + metadata: dict[str, Any], +) -> None: + async with send_error_message_on_exception(context), context.set_status("thinking..."): + await create_document_execute(config, context, message, metadata) + + @asynccontextmanager async def send_error_message_on_exception(context: ConversationContext): try: @@ -271,23 +262,22 @@ async def get(filename: str) -> str: # -# region Response +# region document agent extension helpers # -async def document_agent_respond_to_conversation( +async def create_document_execute( config: AssistantConfigModel, context: ConversationContext, - message: ConversationMessage, + message: ConversationMessage | None, metadata: dict[str, Any] = {}, -) -> bool: +) -> None: """ Respond to a conversation message using the document agent. """ # create the document agent instance document_agent = DocumentAgent(attachments_extension) - is_doc_agent_running = await document_agent.respond_to_conversation(config, context, message, metadata) - return is_doc_agent_running + await document_agent.create_document(config, context, message, metadata) # demonstrates how to respond to a conversation message using the OpenAI API. diff --git a/assistants/prospector-assistant/assistant/config.py b/assistants/prospector-assistant/assistant/config.py index 354cca29..f751eb78 100644 --- a/assistants/prospector-assistant/assistant/config.py +++ b/assistants/prospector-assistant/assistant/config.py @@ -1,4 +1,4 @@ -from typing import Annotated +from typing import Annotated, Literal import openai_client from assistant_extensions.attachments import AttachmentsConfigModel @@ -117,6 +117,14 @@ class RequestConfig(BaseModel): # the workbench app builds dynamic forms based on the configuration model and UI schema class AssistantConfigModel(BaseModel): + guided_workflow: Annotated[ + Literal["Form Completion", "Document Creation"], + Field( + title="Guided Workflow", + description="The workflow extension to guide this conversation.", + ), + ] = "Form Completion" + enable_debug_output: Annotated[ bool, Field(