diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/__init__.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/__init__.py index baaa22063..018d40c95 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/models/__init__.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/__init__.py @@ -9,7 +9,10 @@ from .feedback_records import FeedbackRecords from .generate_answer_request_body import GenerateAnswerRequestBody from .metadata import Metadata +from .prompt import Prompt from .qnamaker_trace_info import QnAMakerTraceInfo +from .qna_request_context import QnARequestContext +from .qna_response_context import QnAResponseContext from .query_result import QueryResult from .query_results import QueryResults from .train_request_body import TrainRequestBody @@ -19,7 +22,10 @@ "FeedbackRecords", "GenerateAnswerRequestBody", "Metadata", + "Prompt", "QnAMakerTraceInfo", + "QnARequestContext", + "QnAResponseContext", "QueryResult", "QueryResults", "TrainRequestBody", diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/generate_answer_request_body.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/generate_answer_request_body.py index 6dba9a124..285900aeb 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/models/generate_answer_request_body.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/generate_answer_request_body.py @@ -6,6 +6,7 @@ from msrest.serialization import Model from .metadata import Metadata +from .qna_request_context import QnARequestContext class GenerateAnswerRequestBody(Model): @@ -16,6 +17,7 @@ class GenerateAnswerRequestBody(Model): "top": {"key": "top", "type": "int"}, "score_threshold": {"key": "scoreThreshold", "type": "float"}, "strict_filters": {"key": "strictFilters", "type": "[Metadata]"}, + "context": {"key": "context", "type": "QnARequestContext"}, } def __init__( @@ -24,6 +26,7 @@ def __init__( top: int, score_threshold: float, strict_filters: List[Metadata], + context: QnARequestContext = None, **kwargs ): """ @@ -36,7 +39,10 @@ def __init__( score_threshold: Threshold for answers returned based on score. - strict_filters: Find only answers that contain these metadata. + strict_filters: Find answers that contains these metadata. + + context: The context from which the QnA was extracted. + """ super().__init__(**kwargs) @@ -45,3 +51,4 @@ def __init__( self.top = top self.score_threshold = score_threshold self.strict_filters = strict_filters + self.context = context diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/prompt.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/prompt.py new file mode 100644 index 000000000..d7f090c87 --- /dev/null +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/prompt.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from msrest.serialization import Model + + +class Prompt(Model): + """ Prompt Object. """ + + _attribute_map = { + "display_order": {"key": "displayOrder", "type": "int"}, + "qna_id": {"key": "qnaId", "type": "int"}, + "qna": {"key": "qna", "type": "object"}, + "display_text": {"key": "displayText", "type": "str"}, + } + + def __init__( + self, + *, + display_order: int, + qna_id: int, + display_text: str, + qna: object = None, + **kwargs + ): + """ + Parameters: + ----------- + + display_order: Index of the prompt - used in ordering of the prompts. + + qna_id: QnA ID. + + display_text: Text displayed to represent a follow up question prompt. + + qna: The QnA object returned from the API (Optional). + + """ + + super(Prompt, self).__init__(**kwargs) + + self.display_order = display_order + self.qna_id = qna_id + self.display_text = display_text + self.qna = qna diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/qna_request_context.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/qna_request_context.py new file mode 100644 index 000000000..ae3342a76 --- /dev/null +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/qna_request_context.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from msrest.serialization import Model + + +class QnARequestContext(Model): + """ + The context associated with QnA. + Used to mark if the current prompt is relevant with a previous question or not. + """ + + _attribute_map = { + "previous_qna_id": {"key": "previousQnAId", "type": "int"}, + "prvious_user_query": {"key": "previousUserQuery", "type": "string"}, + } + + def __init__(self, previous_qna_id: int, prvious_user_query: str, **kwargs): + """ + Parameters: + ----------- + + previous_qna_id: The previous QnA Id that was returned. + + prvious_user_query: The previous user query/question. + """ + + super().__init__(**kwargs) + + self.previous_qna_id = previous_qna_id + self.prvious_user_query = prvious_user_query diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/qna_response_context.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/qna_response_context.py new file mode 100644 index 000000000..537bf09db --- /dev/null +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/qna_response_context.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from typing import List +from msrest.serialization import Model +from .prompt import Prompt + + +class QnAResponseContext(Model): + """ + The context associated with QnA. + Used to mark if the qna response has related prompts. + """ + + _attribute_map = { + "is_context_only": {"key": "isContextOnly", "type": "bool"}, + "prompts": {"key": "prompts", "type": "[Prompt]"}, + } + + def __init__( + self, *, is_context_only: bool = False, prompts: List[Prompt] = None, **kwargs + ): + """ + Parameters: + ----------- + + is_context_only: Whether this prompt is context only. + + prompts: The prompts collection of related prompts. + + """ + + super(QnAResponseContext, self).__init__(**kwargs) + self.is_context_only = is_context_only + self.prompts = prompts diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/qnamaker_trace_info.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/qnamaker_trace_info.py index 22119687d..1a503f07f 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/models/qnamaker_trace_info.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/qnamaker_trace_info.py @@ -6,10 +6,11 @@ from botbuilder.schema import Activity from .metadata import Metadata from .query_result import QueryResult +from .qna_request_context import QnARequestContext class QnAMakerTraceInfo: - """ Represents all the trice info that we collect from the QnAMaker Middleware. """ + """ Represents all the trace info that we collect from the QnAMaker Middleware. """ def __init__( self, @@ -19,6 +20,7 @@ def __init__( score_threshold: float, top: int, strict_filters: List[Metadata], + context: QnARequestContext = None, ): """ Parameters: @@ -35,6 +37,8 @@ def __init__( top: Number of ranked results that are asked to be returned. strict_filters: Filters used on query. + + context: (Optional) The context from which the QnA was extracted. """ self.message = message self.query_results = query_results @@ -42,3 +46,4 @@ def __init__( self.score_threshold = score_threshold self.top = top self.strict_filters = strict_filters + self.context = context diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py b/libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py index 387eb8796..321ea64cf 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py @@ -2,21 +2,35 @@ # Licensed under the MIT License. from typing import List - +from msrest.serialization import Model from .metadata import Metadata +from .qna_response_context import QnAResponseContext -class QueryResult: +class QueryResult(Model): """ Represents an individual result from a knowledge base query. """ + _attribute_map = { + "questions": {"key": "questions", "type": "[str]"}, + "answer": {"key": "answer", "type": "str"}, + "score": {"key": "score", "type": "float"}, + "metadata": {"key": "metadata", "type": "object"}, + "source": {"key": "source", "type": "str"}, + "id": {"key": "id", "type": "int"}, + "context": {"key": "context", "type": "object"}, + } + def __init__( self, + *, questions: List[str], answer: str, score: float, metadata: object = None, source: str = None, id: int = None, # pylint: disable=invalid-name + context: QnAResponseContext = None, + **kwargs ): """ Parameters: @@ -33,10 +47,14 @@ def __init__( source: The source from which the QnA was extracted (if any). id: The index of the answer in the knowledge base. V3 uses 'qnaId', V4 uses 'id' (if any). + + context: The context from which the QnA was extracted. """ + super(QueryResult, self).__init__(**kwargs) self.questions = questions self.answer = answer self.score = score self.metadata = list(map(lambda meta: Metadata(**meta), metadata)) self.source = source + self.context = QnAResponseContext(**context) if context is not None else None self.id = id # pylint: disable=invalid-name diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py b/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py index cacdd7b79..c19c57154 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py @@ -47,12 +47,6 @@ def __init__( "QnAMaker.__init__(): endpoint is not an instance of QnAMakerEndpoint" ) - if endpoint.host.endswith("v2.0"): - raise ValueError( - "v2.0 of QnA Maker service is no longer supported in the Bot Framework. Please upgrade your QnA Maker" - " service at www.qnamaker.ai." - ) - self._endpoint: str = endpoint opt = options or QnAMakerOptions() diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py b/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py index 6387a4682..127b0aa3d 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py @@ -1,8 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from .models import Metadata - +from .models import Metadata, QnARequestContext # figure out if 300 milliseconds is ok for python requests library...or 100000 class QnAMakerOptions: @@ -12,8 +11,10 @@ def __init__( timeout: int = 0, top: int = 0, strict_filters: [Metadata] = None, + context: [QnARequestContext] = None, ): self.score_threshold = score_threshold self.timeout = timeout self.top = top self.strict_filters = strict_filters or [] + self.context = context diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/generate_answer_utils.py b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/generate_answer_utils.py index 8651fb1ed..5831875cc 100644 --- a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/generate_answer_utils.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/generate_answer_utils.py @@ -127,18 +127,22 @@ def _hydrate_options(self, query_options: QnAMakerOptions) -> QnAMakerOptions: ): hydrated_options.timeout = query_options.timeout + if query_options.context: + hydrated_options.context = query_options.context + return hydrated_options async def _query_qna_service( - self, context: TurnContext, options: QnAMakerOptions + self, turn_context: TurnContext, options: QnAMakerOptions ) -> List[QueryResult]: url = f"{ self._endpoint.host }/knowledgebases/{ self._endpoint.knowledge_base_id }/generateAnswer" question = GenerateAnswerRequestBody( - question=context.activity.text, + question=turn_context.activity.text, top=options.top, score_threshold=options.score_threshold, strict_filters=options.strict_filters, + context=options.context, ) http_request_helper = HttpRequestUtils(self._http_client) @@ -161,6 +165,7 @@ async def _emit_trace_info( score_threshold=options.score_threshold, top=options.top, strict_filters=options.strict_filters, + context=options.context, ) trace_activity = Activity( @@ -189,15 +194,6 @@ async def _format_qna_result( answers_within_threshold, key=lambda ans: ans["score"], reverse=True ) - # The old version of the protocol returns the id in a field called qnaId - # The following translates this old structure to the new - is_legacy_protocol: bool = self._endpoint.host.endswith( - "v2.0" - ) or self._endpoint.host.endswith("v3.0") - if is_legacy_protocol: - for answer in answers_within_threshold: - answer["id"] = answer.pop("qnaId", None) - answers_as_query_results = list( map(lambda answer: QueryResult(**answer), sorted_answers) ) diff --git a/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithHighScoreProvidedContext.json b/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithHighScoreProvidedContext.json new file mode 100644 index 000000000..e72863e10 --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithHighScoreProvidedContext.json @@ -0,0 +1,18 @@ +{ + "answers": [ + { + "questions": [ + "Where can I buy cleaning products?" + ], + "answer": "Any DIY store", + "score": 100, + "id": 55, + "source": "Editorial", + "metadata": [], + "context": { + "isContextOnly": true, + "prompts": [] + } + } + ] + } \ No newline at end of file diff --git a/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithLowScoreProvidedWithoutContext.json b/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithLowScoreProvidedWithoutContext.json new file mode 100644 index 000000000..1790ea662 --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithLowScoreProvidedWithoutContext.json @@ -0,0 +1,32 @@ +{ + "answers": [ + { + "questions": [ + "Where can I buy home appliances?" + ], + "answer": "Any Walmart store", + "score": 68, + "id": 56, + "source": "Editorial", + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [] + } + }, + { + "questions": [ + "Where can I buy cleaning products?" + ], + "answer": "Any DIY store", + "score": 56, + "id": 55, + "source": "Editorial", + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [] + } + } + ] + } \ No newline at end of file diff --git a/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithPrompts.json b/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithPrompts.json new file mode 100644 index 000000000..da246deea --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_data/AnswerWithPrompts.json @@ -0,0 +1,25 @@ +{ + "answers": [ + { + "questions": [ + "how do I clean the stove?" + ], + "answer": "BaseCamp: You can use a damp rag to clean around the Power Pack", + "score": 100, + "id": 5, + "source": "Editorial", + "metadata": [], + "context": { + "isContextOnly": true, + "prompts": [ + { + "displayOrder": 0, + "qnaId": 55, + "qna": null, + "displayText": "Where can I buy?" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/libraries/botbuilder-ai/tests/qna/test_qna.py b/libraries/botbuilder-ai/tests/qna/test_qna.py index 148041ec1..24134fc9e 100644 --- a/libraries/botbuilder-ai/tests/qna/test_qna.py +++ b/libraries/botbuilder-ai/tests/qna/test_qna.py @@ -12,7 +12,12 @@ import aiounittest from botbuilder.ai.qna import QnAMakerEndpoint, QnAMaker, QnAMakerOptions -from botbuilder.ai.qna.models import FeedbackRecord, Metadata, QueryResult +from botbuilder.ai.qna.models import ( + FeedbackRecord, + Metadata, + QueryResult, + QnARequestContext, +) from botbuilder.ai.qna.utils import QnATelemetryConstants from botbuilder.core import BotAdapter, BotTelemetryClient, TurnContext from botbuilder.core.adapters import TestAdapter @@ -87,7 +92,6 @@ def test_qnamaker_with_none_endpoint(self): def test_set_default_options_with_no_options_arg(self): qna_without_options = QnAMaker(self.tests_endpoint) - options = qna_without_options._generate_answer_helper.options default_threshold = 0.3 @@ -683,6 +687,63 @@ async def test_should_filter_low_score_variation(self): "Should have 3 filtered answers after low score variation.", ) + async def test_should_answer_with_prompts(self): + options = QnAMakerOptions(top=2) + qna = QnAMaker(QnaApplicationTest.tests_endpoint, options) + question: str = "how do I clean the stove?" + turn_context = QnaApplicationTest._get_context(question, TestAdapter()) + response_json = QnaApplicationTest._get_json_for_file("AnswerWithPrompts.json") + + with patch( + "aiohttp.ClientSession.post", + return_value=aiounittest.futurized(response_json), + ): + results = await qna.get_answers(turn_context, options) + self.assertEqual(1, len(results), "Should have received 1 answers.") + self.assertEqual( + 1, len(results[0].context.prompts), "Should have received 1 prompt." + ) + + async def test_should_answer_with_high_score_provided_context(self): + qna = QnAMaker(QnaApplicationTest.tests_endpoint) + question: str = "where can I buy?" + context = QnARequestContext( + previous_qna_id=5, prvious_user_query="how do I clean the stove?" + ) + options = QnAMakerOptions(top=2, context=context) + turn_context = QnaApplicationTest._get_context(question, TestAdapter()) + response_json = QnaApplicationTest._get_json_for_file( + "AnswerWithHighScoreProvidedContext.json" + ) + + with patch( + "aiohttp.ClientSession.post", + return_value=aiounittest.futurized(response_json), + ): + results = await qna.get_answers(turn_context, options) + self.assertEqual(1, len(results), "Should have received 1 answers.") + self.assertEqual(1, results[0].score, "Score should be high.") + + async def test_should_answer_with_low_score_without_provided_context(self): + qna = QnAMaker(QnaApplicationTest.tests_endpoint) + question: str = "where can I buy?" + options = QnAMakerOptions(top=2, context=None) + + turn_context = QnaApplicationTest._get_context(question, TestAdapter()) + response_json = QnaApplicationTest._get_json_for_file( + "AnswerWithLowScoreProvidedWithoutContext.json" + ) + + with patch( + "aiohttp.ClientSession.post", + return_value=aiounittest.futurized(response_json), + ): + results = await qna.get_answers(turn_context, options) + self.assertEqual( + 2, len(results), "Should have received more than one answers." + ) + self.assertEqual(True, results[0].score < 1, "Score should be low.") + @classmethod async def _get_service_result( cls,