Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[QnA Maker] Multi-turn support for python #305

Merged
merged 8 commits into from
Aug 29, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions libraries/botbuilder-ai/botbuilder/ai/qna/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@
from .feedback_records import FeedbackRecords
from .generate_answer_request_body import GenerateAnswerRequestBody
from .metadata import Metadata
from .prompt import Prompt
from .qnamaker_trace_info import QnAMakerTraceInfo
from .qna_request_context import QnARequestContext
from .qna_response_context import QnAResponseContext
from .query_result import QueryResult
from .query_results import QueryResults
from .train_request_body import TrainRequestBody
Expand All @@ -19,7 +22,10 @@
"FeedbackRecords",
"GenerateAnswerRequestBody",
"Metadata",
"Prompt",
"QnAMakerTraceInfo",
"QnARequestContext",
"QnAResponseContext",
"QueryResult",
"QueryResults",
"TrainRequestBody",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from msrest.serialization import Model

from .metadata import Metadata
from .qna_request_context import QnARequestContext


class GenerateAnswerRequestBody(Model):
Expand All @@ -16,6 +17,7 @@ class GenerateAnswerRequestBody(Model):
"top": {"key": "top", "type": "int"},
"score_threshold": {"key": "scoreThreshold", "type": "float"},
"strict_filters": {"key": "strictFilters", "type": "[Metadata]"},
"context": {"key": "context", "type": "QnARequestContext"},
gurvsing marked this conversation as resolved.
Show resolved Hide resolved
}

def __init__(
Expand All @@ -24,6 +26,7 @@ def __init__(
top: int,
score_threshold: float,
strict_filters: List[Metadata],
context: QnARequestContext = None,
**kwargs
):
"""
Expand All @@ -36,7 +39,10 @@ def __init__(

score_threshold: Threshold for answers returned based on score.

strict_filters: Find only answers that contain these metadata.
strict_filters: Find answers that contains these metadata.

context: The context from which the QnA was extracted.

"""

super().__init__(**kwargs)
Expand All @@ -45,3 +51,4 @@ def __init__(
self.top = top
self.score_threshold = score_threshold
self.strict_filters = strict_filters
self.context = context
45 changes: 45 additions & 0 deletions libraries/botbuilder-ai/botbuilder/ai/qna/models/prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

from msrest.serialization import Model


class Prompt(Model):
""" Prompt Object. """

_attribute_map = {
"display_order": {"key": "displayOrder", "type": "int"},
"qna_id": {"key": "qnaId", "type": "int"},
"qna": {"key": "qna", "type": "object"},
"display_text": {"key": "displayText", "type": "str"},
}

def __init__(
self,
*,
display_order: int,
qna_id: int,
display_text: str,
qna: object = None,
**kwargs
):
"""
Parameters:
-----------

display_order: Index of the prompt - used in ordering of the prompts.

qna_id: QnA ID.

display_text: Text displayed to represent a follow up question prompt.

qna: The QnA object returned from the API (Optional).

"""

super(Prompt, self).__init__(**kwargs)

self.display_order = display_order
self.qna_id = qna_id
self.display_text = display_text
self.qna = qna
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

from msrest.serialization import Model


class QnARequestContext(Model):
gurvsing marked this conversation as resolved.
Show resolved Hide resolved
"""
The context associated with QnA.
Used to mark if the current prompt is relevant with a previous question or not.
"""

_attribute_map = {
"previous_qna_id": {"key": "previousQnAId", "type": "int"},
"prvious_user_query": {"key": "previousUserQuery", "type": "string"},
}

def __init__(self, previous_qna_id: int, prvious_user_query: str, **kwargs):
"""
Parameters:
-----------

previous_qna_id: The previous QnA Id that was returned.

prvious_user_query: The previous user query/question.
"""

super().__init__(**kwargs)

self.previous_qna_id = previous_qna_id
self.prvious_user_query = prvious_user_query
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

from typing import List
from msrest.serialization import Model
from .prompt import Prompt


class QnAResponseContext(Model):
"""
The context associated with QnA.
Used to mark if the qna response has related prompts.
"""

_attribute_map = {
"is_context_only": {"key": "isContextOnly", "type": "bool"},
"prompts": {"key": "prompts", "type": "[Prompt]"},
}

def __init__(
self, *, is_context_only: bool = False, prompts: List[Prompt] = None, **kwargs
):
"""
Parameters:
-----------

is_context_only: Whether this prompt is context only.

prompts: The prompts collection of related prompts.

"""

super(QnAResponseContext, self).__init__(**kwargs)
self.is_context_only = is_context_only
self.prompts = prompts
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
from botbuilder.schema import Activity
from .metadata import Metadata
from .query_result import QueryResult
from .qna_request_context import QnARequestContext


class QnAMakerTraceInfo:
gurvsing marked this conversation as resolved.
Show resolved Hide resolved
""" Represents all the trice info that we collect from the QnAMaker Middleware. """
""" Represents all the trace info that we collect from the QnAMaker Middleware. """

def __init__(
self,
Expand All @@ -19,6 +20,7 @@ def __init__(
score_threshold: float,
top: int,
strict_filters: List[Metadata],
context: QnARequestContext = None,
):
"""
Parameters:
Expand All @@ -35,10 +37,13 @@ def __init__(
top: Number of ranked results that are asked to be returned.

strict_filters: Filters used on query.

context: (Optional) The context from which the QnA was extracted.
"""
self.message = message
self.query_results = query_results
self.knowledge_base_id = knowledge_base_id
self.score_threshold = score_threshold
self.top = top
self.strict_filters = strict_filters
self.context = context
22 changes: 20 additions & 2 deletions libraries/botbuilder-ai/botbuilder/ai/qna/models/query_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,35 @@
# Licensed under the MIT License.

from typing import List

from msrest.serialization import Model
from .metadata import Metadata
from .qna_response_context import QnAResponseContext


class QueryResult:
class QueryResult(Model):
""" Represents an individual result from a knowledge base query. """

_attribute_map = {
"questions": {"key": "questions", "type": "[str]"},
"answer": {"key": "answer", "type": "str"},
"score": {"key": "score", "type": "float"},
"metadata": {"key": "metadata", "type": "object"},
"source": {"key": "source", "type": "str"},
"id": {"key": "id", "type": "int"},
"context": {"key": "context", "type": "object"},
}

def __init__(
self,
*,
questions: List[str],
answer: str,
score: float,
metadata: object = None,
source: str = None,
id: int = None, # pylint: disable=invalid-name
context: QnAResponseContext = None,
**kwargs
):
"""
Parameters:
Expand All @@ -33,10 +47,14 @@ def __init__(
source: The source from which the QnA was extracted (if any).

id: The index of the answer in the knowledge base. V3 uses 'qnaId', V4 uses 'id' (if any).

context: The context from which the QnA was extracted.
"""
super(QueryResult, self).__init__(**kwargs)
self.questions = questions
self.answer = answer
self.score = score
self.metadata = list(map(lambda meta: Metadata(**meta), metadata))
self.source = source
self.context = QnAResponseContext(**context) if context is not None else None
self.id = id # pylint: disable=invalid-name
6 changes: 0 additions & 6 deletions libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,6 @@ def __init__(
"QnAMaker.__init__(): endpoint is not an instance of QnAMakerEndpoint"
)

if endpoint.host.endswith("v2.0"):
raise ValueError(
"v2.0 of QnA Maker service is no longer supported in the Bot Framework. Please upgrade your QnA Maker"
" service at www.qnamaker.ai."
)

self._endpoint: str = endpoint

opt = options or QnAMakerOptions()
Expand Down
5 changes: 3 additions & 2 deletions libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_options.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

from .models import Metadata

from .models import Metadata, QnARequestContext

# figure out if 300 milliseconds is ok for python requests library...or 100000
class QnAMakerOptions:
Expand All @@ -12,8 +11,10 @@ def __init__(
timeout: int = 0,
top: int = 0,
strict_filters: [Metadata] = None,
context: [QnARequestContext] = None,
):
self.score_threshold = score_threshold
self.timeout = timeout
self.top = top
self.strict_filters = strict_filters or []
self.context = context
Original file line number Diff line number Diff line change
Expand Up @@ -127,18 +127,22 @@ def _hydrate_options(self, query_options: QnAMakerOptions) -> QnAMakerOptions:
):
hydrated_options.timeout = query_options.timeout

if query_options.context:
hydrated_options.context = query_options.context

return hydrated_options

async def _query_qna_service(
self, context: TurnContext, options: QnAMakerOptions
self, turn_context: TurnContext, options: QnAMakerOptions
) -> List[QueryResult]:
url = f"{ self._endpoint.host }/knowledgebases/{ self._endpoint.knowledge_base_id }/generateAnswer"

question = GenerateAnswerRequestBody(
question=context.activity.text,
question=turn_context.activity.text,
top=options.top,
score_threshold=options.score_threshold,
strict_filters=options.strict_filters,
context=options.context,
)

http_request_helper = HttpRequestUtils(self._http_client)
Expand All @@ -161,6 +165,7 @@ async def _emit_trace_info(
score_threshold=options.score_threshold,
top=options.top,
strict_filters=options.strict_filters,
context=options.context,
)

trace_activity = Activity(
Expand Down Expand Up @@ -189,15 +194,6 @@ async def _format_qna_result(
answers_within_threshold, key=lambda ans: ans["score"], reverse=True
)

# The old version of the protocol returns the id in a field called qnaId
# The following translates this old structure to the new
is_legacy_protocol: bool = self._endpoint.host.endswith(
"v2.0"
) or self._endpoint.host.endswith("v3.0")
if is_legacy_protocol:
for answer in answers_within_threshold:
answer["id"] = answer.pop("qnaId", None)

answers_as_query_results = list(
map(lambda answer: QueryResult(**answer), sorted_answers)
)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"answers": [
{
"questions": [
"Where can I buy cleaning products?"
],
"answer": "Any DIY store",
"score": 100,
"id": 55,
"source": "Editorial",
"metadata": [],
"context": {
"isContextOnly": true,
"prompts": []
}
}
]
}
Loading