Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

global handling of unhandled exceptions in app, graceful handling of service failures getting speech token #236

Merged
merged 4 commits into from
Nov 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 28 additions & 28 deletions assistants/prospector-assistant/assistant/agents/document_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,21 +761,21 @@ async def _gc_attachment_check(
) -> tuple[Status, StepName | None]:
method_metadata_key = "document_agent_gc_response"

gc_convo_config: GuidedConversationAgentConfigModel = GCAttachmentCheckConfigModel()
gc_conversation_config: GuidedConversationAgentConfigModel = GCAttachmentCheckConfigModel()
# get attachment filenames for context
filenames = await self._attachments_extension.get_attachment_filenames(
context, config=config.agents_config.attachment_agent
)

filenames_str = ", ".join(filenames)
filenames_str = "Filenames already attached: " + filenames_str
gc_convo_config.context = gc_convo_config.context + "\n\n" + filenames_str
gc_conversation_config.context = gc_conversation_config.context + "\n\n" + filenames_str

try:
response_message, conversation_status, next_step_name = await GuidedConversationAgent.step_conversation(
config=config,
openai_client=openai_client.create_client(config.service_config),
agent_config=gc_convo_config,
agent_config=gc_conversation_config,
conversation_context=context,
last_user_message=message.content,
)
Expand Down Expand Up @@ -990,28 +990,27 @@ async def _draft_content(
context, config=config.agents_config.attachment_agent
)

# get outline related info
outline: str | None = None
content: str | None = None
# path = _get_document_agent_conversation_storage_path(context)
if path.exists(storage_directory_for_context(context) / "document_agent/outline.txt"):
outline = (storage_directory_for_context(context) / "document_agent/outline.txt").read_text()
if path.exists(storage_directory_for_context(context) / "document_agent/content.txt"):
content = (storage_directory_for_context(context) / "document_agent/content.txt").read_text()

# create chat completion messages
chat_completion_messages: list[ChatCompletionMessageParam] = []
chat_completion_messages.append(_draft_content_main_system_message())
chat_completion_messages.append(
_chat_history_system_message(conversation.messages, participants_list.participants)
)
chat_completion_messages.extend(attachment_messages)
if outline is not None:
chat_completion_messages.append(_outline_system_message(outline))
if content is not None: # only grabs previously written content, not all yet.
chat_completion_messages.append(_content_system_message(content))

# get outline related info
if path.exists(storage_directory_for_context(context) / "document_agent/outline.txt"):
document_outline = (storage_directory_for_context(context) / "document_agent/outline.txt").read_text()
if document_outline is not None:
chat_completion_messages.append(_outline_system_message(document_outline))

if path.exists(storage_directory_for_context(context) / "document_agent/content.txt"):
document_content = (storage_directory_for_context(context) / "document_agent/content.txt").read_text()
if document_content is not None: # only grabs previously written content, not all yet.
chat_completion_messages.append(_content_system_message(document_content))

# make completion call to openai
content: str | None = None
async with openai_client.create_client(config.service_config) as client:
try:
completion_args = {
Expand All @@ -1031,21 +1030,22 @@ async def _draft_content(
)
_on_error_metadata_update(metadata, method_metadata_key, config, chat_completion_messages, e)

# store only latest version for now (will keep all versions later as need arises)
(storage_directory_for_context(context) / "document_agent/content.txt").write_text(content)
if content is not None:
# store only latest version for now (will keep all versions later as need arises)
(storage_directory_for_context(context) / "document_agent/content.txt").write_text(content)

# send the response to the conversation only if from a command. Otherwise return info to caller.
message_type = MessageType.chat
if message.message_type == MessageType.command:
message_type = MessageType.command
# send the response to the conversation only if from a command. Otherwise return info to caller.
message_type = MessageType.chat
if message.message_type == MessageType.command:
message_type = MessageType.command

await context.send_messages(
NewConversationMessage(
content=content,
message_type=message_type,
metadata=metadata,
await context.send_messages(
NewConversationMessage(
content=content,
message_type=message_type,
metadata=metadata,
)
)
)

return Status.USER_COMPLETED, None

Expand Down
14 changes: 7 additions & 7 deletions workbench-app/pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

37 changes: 35 additions & 2 deletions workbench-app/src/Root.tsx
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import { Toaster } from '@fluentui/react-components';
import { Link, Popover, PopoverSurface, PopoverTrigger, Toaster } from '@fluentui/react-components';
import debug from 'debug';
import React from 'react';
import { Outlet } from 'react-router-dom';
import { Constants } from './Constants';
import useDragAndDrop from './libs/useDragAndDrop';
import { useKeySequence } from './libs/useKeySequence';
import { useNotify } from './libs/useNotify';
import { useAppDispatch, useAppSelector } from './redux/app/hooks';
import { setIsDraggingOverBody, toggleDevMode } from './redux/features/app/appSlice';

Expand All @@ -29,6 +30,38 @@ export const Root: React.FC = () => {
],
() => dispatch(toggleDevMode()),
);
const { notifyError } = useNotify();

const globalErrorHandler = React.useCallback(
(event: PromiseRejectionEvent) => {
log('Unhandled promise rejection', event.reason);
notifyError({
id: ['unhandledrejection', event.reason.message, event.reason.stack].join(':'),
title: 'Unhandled error',
message: event.reason.message,
additionalActions: [
<Popover key="popover">
<PopoverTrigger disableButtonEnhancement>
<Link>More info</Link>
</PopoverTrigger>
<PopoverSurface>
<pre>{event.reason.stack}</pre>
</PopoverSurface>
</Popover>,
],
});
},
[notifyError],
);

React.useEffect(() => {
// add a global error handler to catch unhandled promise rejections
window.addEventListener('unhandledrejection', globalErrorHandler);

return () => {
window.removeEventListener('unhandledrejection', globalErrorHandler);
};
}, [globalErrorHandler]);

// ignore file drop events at the document level as this prevents the browser from
// opening the file in the window if the drop event is not handled or the user misses
Expand All @@ -44,7 +77,7 @@ export const Root: React.FC = () => {
return (
<>
<Outlet />
<Toaster toasterId={Constants.app.globalToasterId} />
<Toaster toasterId={Constants.app.globalToasterId} pauseOnHover pauseOnWindowBlur />
</>
);
};
18 changes: 10 additions & 8 deletions workbench-app/src/components/Conversations/SpeechButton.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ interface SpeechButtonProps {
export const SpeechButton: React.FC<SpeechButtonProps> = (props) => {
const { disabled, onListeningChange, onSpeechRecognizing, onSpeechRecognized } = props;
const [recognizer, setRecognizer] = React.useState<speechSdk.SpeechRecognizer>();
const [isFetching, setIsFetching] = React.useState(false);
const [isInitialized, setIsInitialized] = React.useState(false);
const [isListening, setIsListening] = React.useState(false);
const [lastSpeechResultTimestamp, setLastSpeechResultTimestamp] = React.useState(0);

Expand Down Expand Up @@ -115,15 +115,17 @@ export const SpeechButton: React.FC<SpeechButtonProps> = (props) => {
}, [getAzureSpeechTokenAsync, onSpeechRecognized, onSpeechRecognizing]);

React.useEffect(() => {
// If the recognizer is already available or we are fetching it, do nothing
if (recognizer || isFetching) return;
// If the recognizer is already initialized, return
if (isInitialized) return;

// Indicate that we are fetching the recognizer to prevent multiple fetches
setIsFetching(true);
// Set the recognizer as initialized
setIsInitialized(true);

// Fetch the recognizer, then indicate that we are no longer fetching even if the fetch fails
getRecognizer().finally(() => setIsFetching(false));
}, [getRecognizer, isFetching, recognizer]);
(async () => {
// Fetch the recognizer
await getRecognizer();
})();
}, [getRecognizer, isInitialized, recognizer]);

React.useEffect(() => {
onListeningChange(isListening);
Expand Down
6 changes: 3 additions & 3 deletions workbench-app/src/libs/useNotify.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ interface NotifyOptions {
id: string;
title?: string;
message: string;
details?: string;
subtitle?: string;
action?: Slot<'div'> | string;
additionalActions?: React.ReactElement[];
timeout?: number;
Expand All @@ -27,7 +27,7 @@ export const useNotify = (toasterId: string = Constants.app.globalToasterId) =>

const notify = React.useCallback(
(options: NotifyOptions) => {
const { id, title, message, details, action, additionalActions, timeout, intent } = options;
const { id, title, message, subtitle, action, additionalActions, timeout, intent } = options;

const getAction = () => {
if (typeof action === 'string') {
Expand All @@ -43,7 +43,7 @@ export const useNotify = (toasterId: string = Constants.app.globalToasterId) =>
dispatchToast(
<Toast>
<ToastTitle action={getAction()}>{title}</ToastTitle>
<ToastBody subtitle={details}>{message}</ToastBody>
<ToastBody subtitle={subtitle}>{message}</ToastBody>
{additionalActions && <ToastFooter>{additionalActions}</ToastFooter>}
</Toast>,
{
Expand Down
6 changes: 3 additions & 3 deletions workbench-app/src/main.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { initializeFileTypeIcons } from '@fluentui/react-file-type-icons';
import debug from 'debug';
import React from 'react';
import ReactDOM from 'react-dom/client';
import { Provider } from 'react-redux';
import { Provider as ReduxProvider } from 'react-redux';
import { RouterProvider, createBrowserRouter } from 'react-router-dom';
import { Constants } from './Constants';
import { Root } from './Root';
Expand Down Expand Up @@ -154,7 +154,7 @@ document.addEventListener('DOMContentLoaded', () => {
const root = ReactDOM.createRoot(container);

const app = (
<Provider store={store}>
<ReduxProvider store={store}>
<MsalProvider instance={msalInstance}>
<FluentProvider className="app-container" theme={customTheme}>
<CopilotProvider mode="canvas">
Expand All @@ -167,7 +167,7 @@ document.addEventListener('DOMContentLoaded', () => {
</CopilotProvider>
</FluentProvider>
</MsalProvider>
</Provider>
</ReduxProvider>
);

// NOTE: React.StrictMode is used to help catch common issues in the app but will also double-render
Expand Down
10 changes: 9 additions & 1 deletion workbench-service/semantic_workbench_service/azure_speech.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,22 @@
import logging

from azure.identity import DefaultAzureCredential

from . import settings

logger = logging.getLogger(__name__)


def get_token() -> dict[str, str]:
if settings.azure_speech.resource_id == "" or settings.azure_speech.region == "":
return {}

credential = DefaultAzureCredential()
token = credential.get_token("https://cognitiveservices.azure.com/.default").token
try:
token = credential.get_token("https://cognitiveservices.azure.com/.default").token
except Exception as e:
logger.error(f"Failed to get token: {e}")
return {}

return {
"token": f"aad#{settings.azure_speech.resource_id}#{token}",
Expand Down