From 9a80481857eceebedf93f861d96a879b607985fe Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sat, 6 Feb 2021 13:39:56 +0100 Subject: [PATCH 1/8] dev branch created --- .travis.yml | 27 ------------------ Dockerfile | 28 ------------------- LICENSE | 21 -------------- README.md | 80 ----------------------------------------------------- 4 files changed, 156 deletions(-) delete mode 100644 .travis.yml delete mode 100644 Dockerfile delete mode 100644 LICENSE delete mode 100644 README.md diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 1a1bf72..0000000 --- a/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ - -branches: - only: - - master - - frontend - -language: python -python: - - 3.8 -services: - - docker - -install: - - pip install -r requirements.txt - -before_script: - - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin - -script: - - docker build -t joffreybvn/resa-chatbot:latest . - -deploy: - provider: script - script: - docker push joffreybvn/resa-chatbot:latest; - on: - branch: master \ No newline at end of file diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 5a33e12..0000000 --- a/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM python:3.8-slim-buster - -# Install the security updates. -RUN apt-get update -RUN apt-get -y upgrade - -# Dependencies to build requires packages -RUN apt-get -y install gcc - -# Remove all cached file. Get a smaller image. -RUN apt-get clean -RUN rm -rf /var/lib/apt/lists/* - -EXPOSE 3978 - -# Copy the application. -COPY . /opt/app -WORKDIR /opt/app - -# Install the app librairies. -RUN pip install -r requirements.txt - -# Install SpaCy small model -RUN python -m spacy download en_core_web_sm - -# Start the app. -ENTRYPOINT [ "python" ] -CMD [ "main.py" ] \ No newline at end of file diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 833a380..0000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Vincent leurs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md deleted file mode 100644 index 63a9fee..0000000 --- a/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# chatbot - - - - - -## Cross-plateforme implementation - - -## fonctionnalités du Bot : - -### Traitement des inputs "utilisateurs" - -- Bert ? - -### Possible réponses - -- Accueillire - - Décrire les fonctionnalités du bot - - Répondre -- Donner les heures d'ouvertures -- Afficher une liste d'objet, de produit - -### Nice to have - -- Réserver un service / un produit - - Gérer un agenda - - vérifier la disponibilité - - -- - - "**In English**, the bot should be able to :\n", - "\n", - "- Understand phrases related to a room reservation.\n", - "Example that the bot will have to understand: \n", - "\n", - "\t\t> I wish to reserve a room for 2 people.\n", - "\t\t> I wish to reserve a room for 4 days\n", - "\t\t> Do you have rooms available from July 23rd?\n", - "\t\t> I would like to reserve a room for two days and for two people\n", - "\n", - "- Understand phrases related to a table reservation for the restaurant. \n", - "\n", - "\t\t> I would like to make a reservation for tonight.\n", - "\t\t> I'd like to reserve a table for four people.\n", - "\n", - "- Must ensure a continuous and ongoing conversation. Example of a complete conversation : \n", - "\n", - "\t\t> Customer : Hello !\n", - "\t\t> Bot : Hello, how can I help you? \n", - "\t\t> Customer: I would like to reserve a table for 4 people ? \n", - "\t\t> Bot : For which date would you like to reserve your table?\n", - "\t\t> Customer : Today at 7:00 pm\n", - "\t\t> Bot : What name should I make the reservation under?\n", - "\t\t> Customer : My name is Mr. Dupont! \n", - "\t\t> Bot : Very well Mr Dupont, I confirm you the reservation of a table for 4 people tonight at 7:00 pm. \n", - "\t\t> Bot : Can I help you with something else?\n", - "\t\t> Customer : No thanks\n", - "\t\t> Bot: Have a nice day. \n", - "\n", - "- Understand when the client is angry. In this case, the bot will indicate that it is transmitting the conversation to a human. \n", - "\n", - "\t\t> You're incompetent!\n", - "\t\t> My room is dirty! This is outrageous!\n", - "\t\t> I want to talk to a human. \n", - "\n", - "### Nice-to-have features\n", - "- Create an API of your bot to make it cross-platform \n", - "- Use Docker\n", - - - - -## Hébergement du Bot - -Timeline: -- Etablir l'objectif (déployer bot cross-plateforme + créer propre modele) -- Trouver un framework >> MSBotFramework -- Créer un dataset -- Deployer dummy bot \ No newline at end of file From df1cdd528c45e8b465a310870f5006ae13bc12dd Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sat, 6 Feb 2021 14:10:47 +0100 Subject: [PATCH 2/8] config corrected --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index ad96a25..09437ea 100644 --- a/config.py +++ b/config.py @@ -18,7 +18,7 @@ class Config: MODEL_CLASSIFIER = "bert-base-uncased" # HuggingFace smallest BERT model - For tokenization and classifying # Remote files - s3_base_url = environ.get("S3_BASE_URL", ""), + s3_base_url = environ.get("S3_BASE_URL", "") weight_file = "resa_BERT_model.pt" MODEL_WEIGHT_URL = f"{s3_base_url}/{weight_file}" # Fine-tuned weights for BERT model From dbe44f761f46979e81618bd06a580ec621738c4d Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sat, 6 Feb 2021 15:23:21 +0100 Subject: [PATCH 3/8] Imports rewritten --- notebooks/matcher_concept.ipynb | 436 -------------------- requirements.txt | 1 + src/__init__.py | 4 +- src/bot.py | 23 +- src/classifying/__init__.py | 2 - src/dialogs/__init__.py | 4 + src/dialogs/data_models/__init__.py | 4 + src/dialogs/data_models/user_profile.py | 14 + src/dialogs/example.py | 221 ++++++++++ src/matching/__init__.py | 3 - src/nlu/__init__.py | 4 + src/nlu/classifying/__init__.py | 4 + src/{ => nlu}/classifying/classifier.py | 2 +- src/nlu/matching/__init__.py | 5 + src/{ => nlu}/matching/filter.py | 0 src/{ => nlu}/matching/matcher.py | 2 +- src/nlu/nlu.py | 34 ++ src/nlu/preprocessing/__init__.py | 5 + src/{ => nlu}/preprocessing/preprocessor.py | 0 src/{ => nlu}/preprocessing/tokenizer.py | 0 src/preprocessing/__init__.py | 3 - 21 files changed, 304 insertions(+), 467 deletions(-) delete mode 100644 notebooks/matcher_concept.ipynb delete mode 100644 src/classifying/__init__.py create mode 100644 src/dialogs/__init__.py create mode 100644 src/dialogs/data_models/__init__.py create mode 100644 src/dialogs/data_models/user_profile.py create mode 100644 src/dialogs/example.py delete mode 100644 src/matching/__init__.py create mode 100644 src/nlu/__init__.py create mode 100644 src/nlu/classifying/__init__.py rename src/{ => nlu}/classifying/classifier.py (96%) create mode 100644 src/nlu/matching/__init__.py rename src/{ => nlu}/matching/filter.py (100%) rename src/{ => nlu}/matching/matcher.py (98%) create mode 100644 src/nlu/nlu.py create mode 100644 src/nlu/preprocessing/__init__.py rename src/{ => nlu}/preprocessing/preprocessor.py (100%) rename src/{ => nlu}/preprocessing/tokenizer.py (100%) delete mode 100644 src/preprocessing/__init__.py diff --git a/notebooks/matcher_concept.ipynb b/notebooks/matcher_concept.ipynb deleted file mode 100644 index f6b3d6c..0000000 --- a/notebooks/matcher_concept.ipynb +++ /dev/null @@ -1,436 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "BERT_matcher.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "LE_1jA3iOuyS" - }, - "source": [ - "# Imports" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wTAm9SiyQiH3" - }, - "source": [ - "### Installs" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 598 - }, - "id": "3DM4-q_lOg00", - "outputId": "d1c0ef9c-c067-4a8b-8a7e-eb263a4fde83" - }, - "source": [ - "!pip install polyfuzz" - ], - "execution_count": 1, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Collecting polyfuzz\n", - " Downloading https://files.pythonhosted.org/packages/f0/00/632ac43b4c51e78dec784995db49861f7d6ac7b8ff00bb40127bd3e48854/polyfuzz-0.2.2-py2.py3-none-any.whl\n", - "Requirement already satisfied: matplotlib>=3.2.2 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (3.2.2)\n", - "Requirement already satisfied: scikit-learn>=0.22.2.post1 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (0.22.2.post1)\n", - "Requirement already satisfied: tqdm>=4.41.1 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (4.41.1)\n", - "Requirement already satisfied: seaborn>=0.11.0 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (0.11.1)\n", - "Requirement already satisfied: joblib>=0.14.0 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (1.0.0)\n", - "Requirement already satisfied: scipy>=1.3.1 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (1.4.1)\n", - "Requirement already satisfied: pandas>=0.25.3 in /usr/local/lib/python3.6/dist-packages (from polyfuzz) (1.1.5)\n", - "Collecting rapidfuzz>=0.13.1\n", - "\u001b[?25l Downloading https://files.pythonhosted.org/packages/f3/50/c9c22779370cf3314af40bac33dc2fb42c08fc5bb3f430fefbecceb763e2/rapidfuzz-0.14.2-cp36-cp36m-manylinux2010_x86_64.whl (4.4MB)\n", - "\u001b[K |████████████████████████████████| 4.5MB 6.9MB/s \n", - "\u001b[?25hCollecting numpy<=1.19.4,>=1.18.5\n", - "\u001b[?25l Downloading https://files.pythonhosted.org/packages/87/86/753182c9085ba4936c0076269a571613387cdb77ae2bf537448bfd63472c/numpy-1.19.4-cp36-cp36m-manylinux2010_x86_64.whl (14.5MB)\n", - "\u001b[K |████████████████████████████████| 14.5MB 31.5MB/s \n", - "\u001b[?25hRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.2.2->polyfuzz) (1.3.1)\n", - "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.2.2->polyfuzz) (2.8.1)\n", - "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.2.2->polyfuzz) (2.4.7)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib>=3.2.2->polyfuzz) (0.10.0)\n", - "Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.25.3->polyfuzz) (2018.9)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.1->matplotlib>=3.2.2->polyfuzz) (1.15.0)\n", - "\u001b[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.\u001b[0m\n", - "\u001b[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.\u001b[0m\n", - "Installing collected packages: rapidfuzz, numpy, polyfuzz\n", - " Found existing installation: numpy 1.19.5\n", - " Uninstalling numpy-1.19.5:\n", - " Successfully uninstalled numpy-1.19.5\n", - "Successfully installed numpy-1.19.4 polyfuzz-0.2.2 rapidfuzz-0.14.2\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "application/vnd.colab-display-data+json": { - "pip_warning": { - "packages": [ - "numpy" - ] - } - } - }, - "metadata": { - "tags": [] - } - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pWGzDozZQbed" - }, - "source": [ - "### Libraries" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "_FOXcuKaP-_h" - }, - "source": [ - "from polyfuzz import PolyFuzz\n", - "import re" - ], - "execution_count": 2, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qhw4bDx9QeMv" - }, - "source": [ - "# Matching" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "LvFGGaPCV-xF" - }, - "source": [ - "Instantiate the model" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "RiCNRQXnWAmg" - }, - "source": [ - "# TODO use a lighter model with lemmatization to fasten the inference\n", - "models = PolyFuzz(\"TF-IDF\")" - ], - "execution_count": 3, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "kQCET-PIRgfZ" - }, - "source": [ - "text = \"I wish to reserve a room for 2 peoples\"" - ], - "execution_count": 4, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "v-nPRwsBbaJj" - }, - "source": [ - "Match the phrase" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "DZEtXe3cP9JF", - "outputId": "9648174a-2157-438b-c6bb-d41b355eef29" - }, - "source": [ - "entries = text.split(\" \")\n", - "filter = [\"pearson\", \"people\"]\n", - "\n", - "models.match(entries, filter)" - ], - "execution_count": 5, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 5 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SESCxg6Ibcu7" - }, - "source": [ - "Extract the word" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 328 - }, - "id": "MyjH-V3Huemi", - "outputId": "0c287a3c-1849-41ee-ea43-9ac76b02522f" - }, - "source": [ - "matches" - ], - "execution_count": 12, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
FromToSimilarity
0INone0.000000
1wishNone0.000000
2toNone0.000000
3reserveNone0.000000
4aNone0.000000
5roomNone0.000000
6forNone0.000000
72None0.000000
8peoplespeople0.863168
\n", - "
" - ], - "text/plain": [ - " From To Similarity\n", - "0 I None 0.000000\n", - "1 wish None 0.000000\n", - "2 to None 0.000000\n", - "3 reserve None 0.000000\n", - "4 a None 0.000000\n", - "5 room None 0.000000\n", - "6 for None 0.000000\n", - "7 2 None 0.000000\n", - "8 peoples people 0.863168" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 12 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "nCUoNtQdvkTM" - }, - "source": [ - "thresholded = matches[matches['Similarity'] >= 0.85]" - ], - "execution_count": 13, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "tfry_5nmQFas" - }, - "source": [ - "matches = models.get_matches()\n", - "word = thresholded[thresholded['Similarity'] == thresholded['Similarity'].max()].iloc[0, 0]" - ], - "execution_count": 14, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "pJ77yhlcUgJ9", - "outputId": "50e03113-b413-4c39-d397-f4d62b15c290" - }, - "source": [ - "word" - ], - "execution_count": 15, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'peoples'" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 15 - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "U0oCHYOHbt0M" - }, - "source": [ - "Extract data" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "JGI3Oqy8cSeT" - }, - "source": [ - "result = re.search(f\"(?P\\d)\\W{word}\", text)" - ], - "execution_count": 8, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "vZZRmx7UdLyx", - "outputId": "14362d25-5d52-4cef-db36-cb59dd0ae9a7" - }, - "source": [ - "int(result.group('amount'))" - ], - "execution_count": 9, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "2" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 9 - } - ] - } - ] -} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index d1b640b..26df4ad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ botbuilder-core==4.11.0 botbuilder-integration-aiohttp==4.11.0 botbuilder-schema==4.11.0 botframework-connector==4.11.0 +botbuilder-dialogs==4.11.0 aiohttp==3.6.2 # Preprocessing diff --git a/src/__init__.py b/src/__init__.py index 31bf79b..b00d0e0 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -1,2 +1,4 @@ -from src.bot import Bot +from .bot import Bot + +__all__ = ["Bot"] diff --git a/src/bot.py b/src/bot.py index fe1fa58..7a6c76c 100644 --- a/src/bot.py +++ b/src/bot.py @@ -2,17 +2,8 @@ from botbuilder.core import ActivityHandler, MessageFactory, TurnContext from botbuilder.schema import ChannelAccount -from src.matching import Matcher -from src.preprocessing import Preprocessor, Tokenizer -from src.classifying import Classifier - -# Preprocessing -preprocessor = Preprocessor() -tokenizer = Tokenizer() - -# Classifier -classifier = Classifier() -matcher = Matcher() +from .nlu import NLU +nlu = NLU() class Bot(ActivityHandler): @@ -25,16 +16,8 @@ async def on_members_added_activity(self, members_added: [ChannelAccount], turn_ async def on_message_activity(self, turn_context: TurnContext): - # Get the message - message = turn_context.activity.text - - # Clean the message and create a dataset of tokens - preprocessed_text = preprocessor.preprocess(message) - dataset = tokenizer.get_dataset(preprocessed_text) - # Get the intention - intent = classifier.predict(dataset) - keywords = matcher.get_keywords(preprocessed_text, intent) + intent, keywords = nlu.get_intent(turn_context.activity.text) return await turn_context.send_activity( MessageFactory.text(f""" diff --git a/src/classifying/__init__.py b/src/classifying/__init__.py deleted file mode 100644 index c2d1499..0000000 --- a/src/classifying/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ - -from src.classifying.classifier import Classifier diff --git a/src/dialogs/__init__.py b/src/dialogs/__init__.py new file mode 100644 index 0000000..40dfb8b --- /dev/null +++ b/src/dialogs/__init__.py @@ -0,0 +1,4 @@ + +from .example import Example + +__all__ = ["Example"] diff --git a/src/dialogs/data_models/__init__.py b/src/dialogs/data_models/__init__.py new file mode 100644 index 0000000..7171365 --- /dev/null +++ b/src/dialogs/data_models/__init__.py @@ -0,0 +1,4 @@ + +from .user_profile import UserProfile + +__all__ = ["UserProfile"] diff --git a/src/dialogs/data_models/user_profile.py b/src/dialogs/data_models/user_profile.py new file mode 100644 index 0000000..b079352 --- /dev/null +++ b/src/dialogs/data_models/user_profile.py @@ -0,0 +1,14 @@ + +from botbuilder.schema import Attachment + + +class UserProfile: + """ + This is our application state. Just a regular serializable Python class. + """ + + def __init__(self, name: str = None, transport: str = None, age: int = 0, picture: Attachment = None): + self.name = name + self.transport = transport + self.age = age + self.picture = picture diff --git a/src/dialogs/example.py b/src/dialogs/example.py new file mode 100644 index 0000000..b75fd0c --- /dev/null +++ b/src/dialogs/example.py @@ -0,0 +1,221 @@ + +from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext, DialogTurnResult +from botbuilder.dialogs.prompts import TextPrompt, NumberPrompt, ChoicePrompt, ConfirmPrompt, AttachmentPrompt, PromptOptions, PromptValidatorContext +from botbuilder.dialogs.choices import Choice +from botbuilder.core import MessageFactory, UserState + +from .data_models import UserProfile + + +class UserProfileDialog(ComponentDialog): + + def __init__(self, user_state: UserState): + super(UserProfileDialog, self).__init__(UserProfileDialog.__name__) + + self.user_profile_accessor = user_state.create_property("UserProfile") + + self.add_dialog( + WaterfallDialog( + WaterfallDialog.__name__, + [ + self.transport_step, + self.name_step, + self.name_confirm_step, + self.age_step, + self.picture_step, + self.confirm_step, + self.summary_step, + ], + ) + ) + self.add_dialog(TextPrompt(TextPrompt.__name__)) + self.add_dialog( + NumberPrompt(NumberPrompt.__name__, UserProfileDialog.age_prompt_validator) + ) + self.add_dialog(ChoicePrompt(ChoicePrompt.__name__)) + self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__)) + self.add_dialog( + AttachmentPrompt( + AttachmentPrompt.__name__, UserProfileDialog.picture_prompt_validator + ) + ) + + self.initial_dialog_id = WaterfallDialog.__name__ + + async def transport_step( + self, step_context: WaterfallStepContext + ) -> DialogTurnResult: + # WaterfallStep always finishes with the end of the Waterfall or with another dialog; + # here it is a Prompt Dialog. Running a prompt here means the next WaterfallStep will + # be run when the users response is received. + return await step_context.prompt( + ChoicePrompt.__name__, + PromptOptions( + prompt=MessageFactory.text("Please enter your mode of transport."), + choices=[Choice("Car"), Choice("Bus"), Choice("Bicycle")], + ), + ) + + async def name_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: + step_context.values["transport"] = step_context.result.value + + return await step_context.prompt( + TextPrompt.__name__, + PromptOptions(prompt=MessageFactory.text("Please enter your name.")), + ) + + async def name_confirm_step( + self, step_context: WaterfallStepContext + ) -> DialogTurnResult: + step_context.values["name"] = step_context.result + + # We can send messages to the user at any point in the WaterfallStep. + await step_context.context.send_activity( + MessageFactory.text(f"Thanks {step_context.result}") + ) + + # WaterfallStep always finishes with the end of the Waterfall or + # with another dialog; here it is a Prompt Dialog. + return await step_context.prompt( + ConfirmPrompt.__name__, + PromptOptions( + prompt=MessageFactory.text("Would you like to give your age?") + ), + ) + + async def age_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: + if step_context.result: + # User said "yes" so we will be prompting for the age. + # WaterfallStep always finishes with the end of the Waterfall or with another dialog, + # here it is a Prompt Dialog. + return await step_context.prompt( + NumberPrompt.__name__, + PromptOptions( + prompt=MessageFactory.text("Please enter your age."), + retry_prompt=MessageFactory.text( + "The value entered must be greater than 0 and less than 150." + ), + ), + ) + + # User said "no" so we will skip the next step. Give -1 as the age. + return await step_context.next(-1) + + async def picture_step( + self, step_context: WaterfallStepContext + ) -> DialogTurnResult: + age = step_context.result + step_context.values["age"] = age + + msg = ( + "No age given." + if step_context.result == -1 + else f"I have your age as {age}." + ) + + # We can send messages to the user at any point in the WaterfallStep. + await step_context.context.send_activity(MessageFactory.text(msg)) + + if step_context.context.activity.channel_id == "msteams": + # This attachment prompt example is not designed to work for Teams attachments, so skip it in this case + await step_context.context.send_activity( + "Skipping attachment prompt in Teams channel..." + ) + return await step_context.next(None) + + # WaterfallStep always finishes with the end of the Waterfall or with another dialog; here it is a Prompt + # Dialog. + prompt_options = PromptOptions( + prompt=MessageFactory.text( + "Please attach a profile picture (or type any message to skip)." + ), + retry_prompt=MessageFactory.text( + "The attachment must be a jpeg/png image file." + ), + ) + return await step_context.prompt(AttachmentPrompt.__name__, prompt_options) + + async def confirm_step( + self, step_context: WaterfallStepContext + ) -> DialogTurnResult: + step_context.values["picture"] = ( + None if not step_context.result else step_context.result[0] + ) + + # WaterfallStep always finishes with the end of the Waterfall or + # with another dialog; here it is a Prompt Dialog. + return await step_context.prompt( + ConfirmPrompt.__name__, + PromptOptions(prompt=MessageFactory.text("Is this ok?")), + ) + + async def summary_step( + self, step_context: WaterfallStepContext + ) -> DialogTurnResult: + if step_context.result: + # Get the current profile object from user state. Changes to it + # will saved during Bot.on_turn. + user_profile = await self.user_profile_accessor.get( + step_context.context, UserProfile + ) + + user_profile.transport = step_context.values["transport"] + user_profile.name = step_context.values["name"] + user_profile.age = step_context.values["age"] + user_profile.picture = step_context.values["picture"] + + msg = f"I have your mode of transport as {user_profile.transport} and your name as {user_profile.name}." + if user_profile.age != -1: + msg += f" And age as {user_profile.age}." + + await step_context.context.send_activity(MessageFactory.text(msg)) + + if user_profile.picture: + await step_context.context.send_activity( + MessageFactory.attachment( + user_profile.picture, "This is your profile picture." + ) + ) + else: + await step_context.context.send_activity( + "A profile picture was saved but could not be displayed here." + ) + else: + await step_context.context.send_activity( + MessageFactory.text("Thanks. Your profile will not be kept.") + ) + + # WaterfallStep always finishes with the end of the Waterfall or with another + # dialog, here it is the end. + return await step_context.end_dialog() + + @staticmethod + async def age_prompt_validator(prompt_context: PromptValidatorContext) -> bool: + # This condition is our validation rule. You can also change the value at this point. + return ( + prompt_context.recognized.succeeded + and 0 < prompt_context.recognized.value < 150 + ) + + @staticmethod + async def picture_prompt_validator(prompt_context: PromptValidatorContext) -> bool: + if not prompt_context.recognized.succeeded: + await prompt_context.context.send_activity( + "No attachments received. Proceeding without a profile picture..." + ) + + # We can return true from a validator function even if recognized.succeeded is false. + return True + + attachments = prompt_context.recognized.value + + valid_images = [ + attachment + for attachment in attachments + if attachment.content_type in ["image/jpeg", "image/png"] + ] + + prompt_context.recognized.value = valid_images + + # If none of the attachments are valid images, the retry prompt should be sent. + return len(valid_images) > 0 \ No newline at end of file diff --git a/src/matching/__init__.py b/src/matching/__init__.py deleted file mode 100644 index 044afdd..0000000 --- a/src/matching/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ - -from src.matching.filter import Filter -from src.matching.matcher import Matcher diff --git a/src/nlu/__init__.py b/src/nlu/__init__.py new file mode 100644 index 0000000..d17c5f8 --- /dev/null +++ b/src/nlu/__init__.py @@ -0,0 +1,4 @@ + +from .nlu import NLU + +__all__ = ["NLU"] diff --git a/src/nlu/classifying/__init__.py b/src/nlu/classifying/__init__.py new file mode 100644 index 0000000..453d49a --- /dev/null +++ b/src/nlu/classifying/__init__.py @@ -0,0 +1,4 @@ + +from .classifier import Classifier + +__all__ = ["Classifier"] diff --git a/src/classifying/classifier.py b/src/nlu/classifying/classifier.py similarity index 96% rename from src/classifying/classifier.py rename to src/nlu/classifying/classifier.py index 62f994f..d187328 100644 --- a/src/classifying/classifier.py +++ b/src/nlu/classifying/classifier.py @@ -50,7 +50,7 @@ def _load_model(self) -> BertForSequenceClassification: """ # Download and save the weights locally - self.__load_remote_file(config.MODEL_WEIGHT_URL, config.MODEL_WEIGHT_LOCAL_COPY) + # self.__load_remote_file(config.MODEL_WEIGHT_URL, config.MODEL_WEIGHT_LOCAL_COPY) # Instantiate the model model = BertForSequenceClassification.from_pretrained( diff --git a/src/nlu/matching/__init__.py b/src/nlu/matching/__init__.py new file mode 100644 index 0000000..fa2ea14 --- /dev/null +++ b/src/nlu/matching/__init__.py @@ -0,0 +1,5 @@ + +from .filter import Filter +from .matcher import Matcher + +__all__ = ["Filter", "Matcher"] diff --git a/src/matching/filter.py b/src/nlu/matching/filter.py similarity index 100% rename from src/matching/filter.py rename to src/nlu/matching/filter.py diff --git a/src/matching/matcher.py b/src/nlu/matching/matcher.py similarity index 98% rename from src/matching/matcher.py rename to src/nlu/matching/matcher.py index 47063d4..1f39ccc 100644 --- a/src/matching/matcher.py +++ b/src/nlu/matching/matcher.py @@ -6,7 +6,7 @@ import pandas as pd from polyfuzz import PolyFuzz -from src.matching import Filter +from . import Filter from config import Config config = Config() diff --git a/src/nlu/nlu.py b/src/nlu/nlu.py new file mode 100644 index 0000000..c59370a --- /dev/null +++ b/src/nlu/nlu.py @@ -0,0 +1,34 @@ + +from typing import Tuple + +from src.nlu.matching import Matcher +from src.nlu.preprocessing import Preprocessor, Tokenizer +from src.nlu.classifying import Classifier + + +class NLU: + + def __init__(self): + + # Preprocessing + self.preprocessor = Preprocessor() + self.tokenizer = Tokenizer() + + # Classifier + self.classifier = Classifier() + self.matcher = Matcher() + + def get_intent(self, message: str) -> Tuple[str, dict]: + """ + Return the intention and the keywords of a given message. + """ + + # Clean the message and create a dataset of tokens + preprocessed_text = self.preprocessor.preprocess(message) + dataset = self.tokenizer.get_dataset(preprocessed_text) + + # Get the intention + intent = self.classifier.predict(dataset) + keywords = self.matcher.get_keywords(preprocessed_text, intent) + + return intent, keywords diff --git a/src/nlu/preprocessing/__init__.py b/src/nlu/preprocessing/__init__.py new file mode 100644 index 0000000..59813be --- /dev/null +++ b/src/nlu/preprocessing/__init__.py @@ -0,0 +1,5 @@ + +from .tokenizer import Tokenizer +from .preprocessor import Preprocessor + +__all__ = ["Tokenizer", "Preprocessor"] diff --git a/src/preprocessing/preprocessor.py b/src/nlu/preprocessing/preprocessor.py similarity index 100% rename from src/preprocessing/preprocessor.py rename to src/nlu/preprocessing/preprocessor.py diff --git a/src/preprocessing/tokenizer.py b/src/nlu/preprocessing/tokenizer.py similarity index 100% rename from src/preprocessing/tokenizer.py rename to src/nlu/preprocessing/tokenizer.py diff --git a/src/preprocessing/__init__.py b/src/preprocessing/__init__.py deleted file mode 100644 index 2c7c3f0..0000000 --- a/src/preprocessing/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ - -from src.preprocessing.tokenizer import Tokenizer -from src.preprocessing.preprocessor import Preprocessor From 9fbc892653e21743a7d8af004d2ec24a2e9ab3e1 Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sat, 6 Feb 2021 15:54:22 +0100 Subject: [PATCH 4/8] Example dialog implemented --- main.py | 24 +++++++++++++++++++----- src/bot.py | 34 ++++++++++++++++++++-------------- src/dialogs/__init__.py | 5 +++-- src/dialogs/dialogs_helper.py | 18 ++++++++++++++++++ 4 files changed, 60 insertions(+), 21 deletions(-) create mode 100644 src/dialogs/dialogs_helper.py diff --git a/main.py b/main.py index 3175024..f5d8569 100644 --- a/main.py +++ b/main.py @@ -6,20 +6,20 @@ from aiohttp import web from aiohttp.web import Request, Response, json_response -from botbuilder.core import BotFrameworkAdapterSettings, TurnContext, BotFrameworkAdapter +from botbuilder.core import BotFrameworkAdapterSettings, TurnContext, BotFrameworkAdapter, ConversationState, MemoryStorage, UserState from botbuilder.core.integration import aiohttp_error_middleware from botbuilder.schema import Activity, ActivityTypes +from src.dialogs import UserProfileDialog from src import Bot from config import Config # Load the config and create the bot config = Config() -bot = Bot() # Init a Bot adapter https://aka.ms/about-bot-adapter settings = BotFrameworkAdapterSettings(config.APP_ID, config.APP_PASSWORD) -adapter = BotFrameworkAdapter(settings) +ADAPTER = BotFrameworkAdapter(settings) # Catch-all for errors @@ -51,7 +51,21 @@ async def on_error(context: TurnContext, error_: Exception): ) await context.send_activity(trace_activity) -adapter.on_turn_error = on_error + # Clear out state + await CONVERSATION_STATE.delete(context) + + +# Set the error handler on the Adapter. +ADAPTER.on_turn_error = on_error + +# Create MemoryStorage, UserState and ConversationState +MEMORY = MemoryStorage() +CONVERSATION_STATE = ConversationState(MEMORY) +USER_STATE = UserState(MEMORY) + +# Create main dialog and bot +DIALOG = UserProfileDialog(USER_STATE) +bot = Bot(CONVERSATION_STATE, USER_STATE, DIALOG) # Direct message API @@ -76,7 +90,7 @@ async def messages(req: Request) -> Response: auth_header = req.headers["Authorization"] # Call the bot and send back its response - response = await adapter.process_activity(activity, auth_header, bot.on_turn) + response = await ADAPTER.process_activity(activity, auth_header, bot.on_turn) if response: return json_response(data=response.body, status=response.status) diff --git a/src/bot.py b/src/bot.py index 7a6c76c..a8de2f3 100644 --- a/src/bot.py +++ b/src/bot.py @@ -1,27 +1,33 @@ -from botbuilder.core import ActivityHandler, MessageFactory, TurnContext -from botbuilder.schema import ChannelAccount +from botbuilder.core import ActivityHandler, TurnContext, ConversationState, UserState +from botbuilder.dialogs import Dialog +from .dialogs import DialogHelper from .nlu import NLU + nlu = NLU() class Bot(ActivityHandler): - async def on_members_added_activity(self, members_added: [ChannelAccount], turn_context: TurnContext): + def __init__(self, conversation_state: ConversationState, user_state: UserState, dialog: Dialog): - for member in members_added: - if member.id != turn_context.activity.recipient.id: - await turn_context.send_activity("Hello and welcome!") + self.conversation_state = conversation_state + self.user_state = user_state + self.dialog = dialog - async def on_message_activity(self, turn_context: TurnContext): + async def on_turn(self, turn_context: TurnContext): + + await super().on_turn(turn_context) - # Get the intention - intent, keywords = nlu.get_intent(turn_context.activity.text) + # Save any state changes that might have occurred during the turn. + await self.conversation_state.save_changes(turn_context) + await self.user_state.save_changes(turn_context) + + async def on_message_activity(self, turn_context: TurnContext): - return await turn_context.send_activity( - MessageFactory.text(f""" - intent: {intent}, - keywords: {keywords} - """) + await DialogHelper.run_dialog( + self.dialog, + turn_context, + self.conversation_state.create_property("DialogState"), ) diff --git a/src/dialogs/__init__.py b/src/dialogs/__init__.py index 40dfb8b..74e9a06 100644 --- a/src/dialogs/__init__.py +++ b/src/dialogs/__init__.py @@ -1,4 +1,5 @@ -from .example import Example +from .dialogs_helper import DialogHelper +from .example import UserProfileDialog -__all__ = ["Example"] +__all__ = ["DialogHelper", "UserProfileDialog"] diff --git a/src/dialogs/dialogs_helper.py b/src/dialogs/dialogs_helper.py new file mode 100644 index 0000000..531b325 --- /dev/null +++ b/src/dialogs/dialogs_helper.py @@ -0,0 +1,18 @@ + +from botbuilder.core import StatePropertyAccessor, TurnContext +from botbuilder.dialogs import Dialog, DialogSet, DialogTurnStatus + + +class DialogHelper: + + @staticmethod + async def run_dialog(dialog: Dialog, turn_context: TurnContext, accessor: StatePropertyAccessor): + + dialog_set = DialogSet(accessor) + dialog_set.add(dialog) + + dialog_context = await dialog_set.create_context(turn_context) + results = await dialog_context.continue_dialog() + + if results.status == DialogTurnStatus.Empty: + await dialog_context.begin_dialog(dialog.id) From 9ca51926368de2b68e2ee1a1c0633e7b468624ea Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sat, 6 Feb 2021 17:25:52 +0100 Subject: [PATCH 5/8] Simple waterfall hotel conversation implemented --- src/dialogs/example.py | 226 +++++++++++------------------------------ 1 file changed, 57 insertions(+), 169 deletions(-) diff --git a/src/dialogs/example.py b/src/dialogs/example.py index b75fd0c..653ddae 100644 --- a/src/dialogs/example.py +++ b/src/dialogs/example.py @@ -12,210 +12,98 @@ class UserProfileDialog(ComponentDialog): def __init__(self, user_state: UserState): super(UserProfileDialog, self).__init__(UserProfileDialog.__name__) + # Load the UserProfile class self.user_profile_accessor = user_state.create_property("UserProfile") - self.add_dialog( - WaterfallDialog( - WaterfallDialog.__name__, - [ - self.transport_step, - self.name_step, - self.name_confirm_step, - self.age_step, - self.picture_step, - self.confirm_step, - self.summary_step, - ], - ) - ) - self.add_dialog(TextPrompt(TextPrompt.__name__)) - self.add_dialog( - NumberPrompt(NumberPrompt.__name__, UserProfileDialog.age_prompt_validator) - ) + # Setup the waterfall dialog + self.add_dialog(WaterfallDialog(WaterfallDialog.__name__, [ + self.people_step, + self.nights_step, + self.breakfast_step, + self.summary_step, + ])) + + # Append the prompts and custom prompts + # self.add_dialog(TextPrompt(TextPrompt.__name__)) + self.add_dialog(NumberPrompt(NumberPrompt.__name__)) self.add_dialog(ChoicePrompt(ChoicePrompt.__name__)) self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__)) - self.add_dialog( - AttachmentPrompt( - AttachmentPrompt.__name__, UserProfileDialog.picture_prompt_validator - ) - ) self.initial_dialog_id = WaterfallDialog.__name__ - async def transport_step( - self, step_context: WaterfallStepContext - ) -> DialogTurnResult: - # WaterfallStep always finishes with the end of the Waterfall or with another dialog; - # here it is a Prompt Dialog. Running a prompt here means the next WaterfallStep will - # be run when the users response is received. + @staticmethod + async def people_step(step_context: WaterfallStepContext) -> DialogTurnResult: + + # ChoicePrompt - How many people ? return await step_context.prompt( ChoicePrompt.__name__, PromptOptions( - prompt=MessageFactory.text("Please enter your mode of transport."), - choices=[Choice("Car"), Choice("Bus"), Choice("Bicycle")], + prompt=MessageFactory.text("What size room will you need?"), + choices=[ + Choice("2 peoples"), + Choice("4 peoples"), + ], ), ) - async def name_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: - step_context.values["transport"] = step_context.result.value - - return await step_context.prompt( - TextPrompt.__name__, - PromptOptions(prompt=MessageFactory.text("Please enter your name.")), - ) + @staticmethod + async def nights_step(step_context: WaterfallStepContext) -> DialogTurnResult: - async def name_confirm_step( - self, step_context: WaterfallStepContext - ) -> DialogTurnResult: - step_context.values["name"] = step_context.result + # Save the number of people + step_context.values["peoples"] = step_context.result.value - # We can send messages to the user at any point in the WaterfallStep. + # Confirm the number of people await step_context.context.send_activity( - MessageFactory.text(f"Thanks {step_context.result}") + MessageFactory.text(f"Okay, for {step_context.result.value}") ) - # WaterfallStep always finishes with the end of the Waterfall or - # with another dialog; here it is a Prompt Dialog. + # NumberPrompt - How many nights ? return await step_context.prompt( - ConfirmPrompt.__name__, + NumberPrompt.__name__, PromptOptions( - prompt=MessageFactory.text("Would you like to give your age?") + prompt=MessageFactory.text("How long do you want to stay?") ), ) - async def age_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: - if step_context.result: - # User said "yes" so we will be prompting for the age. - # WaterfallStep always finishes with the end of the Waterfall or with another dialog, - # here it is a Prompt Dialog. - return await step_context.prompt( - NumberPrompt.__name__, - PromptOptions( - prompt=MessageFactory.text("Please enter your age."), - retry_prompt=MessageFactory.text( - "The value entered must be greater than 0 and less than 150." - ), - ), - ) - - # User said "no" so we will skip the next step. Give -1 as the age. - return await step_context.next(-1) - - async def picture_step( - self, step_context: WaterfallStepContext - ) -> DialogTurnResult: - age = step_context.result - step_context.values["age"] = age - - msg = ( - "No age given." - if step_context.result == -1 - else f"I have your age as {age}." - ) - - # We can send messages to the user at any point in the WaterfallStep. - await step_context.context.send_activity(MessageFactory.text(msg)) - - if step_context.context.activity.channel_id == "msteams": - # This attachment prompt example is not designed to work for Teams attachments, so skip it in this case - await step_context.context.send_activity( - "Skipping attachment prompt in Teams channel..." - ) - return await step_context.next(None) + @staticmethod + async def breakfast_step(step_context: WaterfallStepContext) -> DialogTurnResult: - # WaterfallStep always finishes with the end of the Waterfall or with another dialog; here it is a Prompt - # Dialog. - prompt_options = PromptOptions( - prompt=MessageFactory.text( - "Please attach a profile picture (or type any message to skip)." - ), - retry_prompt=MessageFactory.text( - "The attachment must be a jpeg/png image file." - ), - ) - return await step_context.prompt(AttachmentPrompt.__name__, prompt_options) + # Save the number of nights + step_context.values["nights"] = step_context.result - async def confirm_step( - self, step_context: WaterfallStepContext - ) -> DialogTurnResult: - step_context.values["picture"] = ( - None if not step_context.result else step_context.result[0] - ) - - # WaterfallStep always finishes with the end of the Waterfall or - # with another dialog; here it is a Prompt Dialog. + # ConfirmPrompt - Is taking breakfast ? return await step_context.prompt( ConfirmPrompt.__name__, - PromptOptions(prompt=MessageFactory.text("Is this ok?")), + PromptOptions( + prompt=MessageFactory.text("Will you be having breakfast?") + ), ) - async def summary_step( - self, step_context: WaterfallStepContext - ) -> DialogTurnResult: + async def summary_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: + + # If the user said True: if step_context.result: - # Get the current profile object from user state. Changes to it - # will saved during Bot.on_turn. - user_profile = await self.user_profile_accessor.get( - step_context.context, UserProfile - ) - user_profile.transport = step_context.values["transport"] - user_profile.name = step_context.values["name"] - user_profile.age = step_context.values["age"] - user_profile.picture = step_context.values["picture"] - - msg = f"I have your mode of transport as {user_profile.transport} and your name as {user_profile.name}." - if user_profile.age != -1: - msg += f" And age as {user_profile.age}." - - await step_context.context.send_activity(MessageFactory.text(msg)) - - if user_profile.picture: - await step_context.context.send_activity( - MessageFactory.attachment( - user_profile.picture, "This is your profile picture." - ) - ) - else: - await step_context.context.send_activity( - "A profile picture was saved but could not be displayed here." - ) - else: + # Confirm breakfast hour await step_context.context.send_activity( - MessageFactory.text("Thanks. Your profile will not be kept.") + MessageFactory.text(f"Perfect, breakfast is from 6am to 10am.") ) - # WaterfallStep always finishes with the end of the Waterfall or with another - # dialog, here it is the end. - return await step_context.end_dialog() - - @staticmethod - async def age_prompt_validator(prompt_context: PromptValidatorContext) -> bool: - # This condition is our validation rule. You can also change the value at this point. - return ( - prompt_context.recognized.succeeded - and 0 < prompt_context.recognized.value < 150 + # Save information to Reservation object + user_profile = await self.user_profile_accessor.get( + step_context.context, UserProfile ) - @staticmethod - async def picture_prompt_validator(prompt_context: PromptValidatorContext) -> bool: - if not prompt_context.recognized.succeeded: - await prompt_context.context.send_activity( - "No attachments received. Proceeding without a profile picture..." - ) - - # We can return true from a validator function even if recognized.succeeded is false. - return True + """ + user_profile.transport = step_context.values["transport"] + user_profile.name = step_context.values["name"] + user_profile.age = step_context.values["age"] + user_profile.picture = step_context.values["picture"] + """ - attachments = prompt_context.recognized.value - - valid_images = [ - attachment - for attachment in attachments - if attachment.content_type in ["image/jpeg", "image/png"] - ] - - prompt_context.recognized.value = valid_images + # End the dialog + await step_context.context.send_activity( + MessageFactory.text("Thanks. See you !") + ) - # If none of the attachments are valid images, the retry prompt should be sent. - return len(valid_images) > 0 \ No newline at end of file + return await step_context.end_dialog() From 692e8437568f1a78370c969330740ba65d513a57 Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sat, 6 Feb 2021 17:36:26 +0100 Subject: [PATCH 6/8] Data model for room reservation implemented --- main.py | 4 +-- src/bot.py | 2 +- src/dialogs/__init__.py | 5 ++- src/dialogs/data_models/__init__.py | 4 +-- src/dialogs/data_models/room_reservation.py | 9 ++++++ src/dialogs/data_models/user_profile.py | 14 -------- src/dialogs/helpers/__init__.py | 4 +++ src/dialogs/{ => helpers}/dialogs_helper.py | 0 src/dialogs/helpers/nlu_helper.py | 0 ...{example.py => room_reservation_dialog.py} | 32 +++++++++---------- 10 files changed, 36 insertions(+), 38 deletions(-) create mode 100644 src/dialogs/data_models/room_reservation.py delete mode 100644 src/dialogs/data_models/user_profile.py create mode 100644 src/dialogs/helpers/__init__.py rename src/dialogs/{ => helpers}/dialogs_helper.py (100%) create mode 100644 src/dialogs/helpers/nlu_helper.py rename src/dialogs/{example.py => room_reservation_dialog.py} (76%) diff --git a/main.py b/main.py index f5d8569..0073228 100644 --- a/main.py +++ b/main.py @@ -10,7 +10,7 @@ from botbuilder.core.integration import aiohttp_error_middleware from botbuilder.schema import Activity, ActivityTypes -from src.dialogs import UserProfileDialog +from src.dialogs import RoomReservationDialog from src import Bot from config import Config @@ -64,7 +64,7 @@ async def on_error(context: TurnContext, error_: Exception): USER_STATE = UserState(MEMORY) # Create main dialog and bot -DIALOG = UserProfileDialog(USER_STATE) +DIALOG = RoomReservationDialog(USER_STATE) bot = Bot(CONVERSATION_STATE, USER_STATE, DIALOG) diff --git a/src/bot.py b/src/bot.py index a8de2f3..e7fc5f3 100644 --- a/src/bot.py +++ b/src/bot.py @@ -2,7 +2,7 @@ from botbuilder.core import ActivityHandler, TurnContext, ConversationState, UserState from botbuilder.dialogs import Dialog -from .dialogs import DialogHelper +from .dialogs.helpers import DialogHelper from .nlu import NLU nlu = NLU() diff --git a/src/dialogs/__init__.py b/src/dialogs/__init__.py index 74e9a06..4c695f0 100644 --- a/src/dialogs/__init__.py +++ b/src/dialogs/__init__.py @@ -1,5 +1,4 @@ -from .dialogs_helper import DialogHelper -from .example import UserProfileDialog +from .room_reservation_dialog import RoomReservationDialog -__all__ = ["DialogHelper", "UserProfileDialog"] +__all__ = ["RoomReservationDialog"] diff --git a/src/dialogs/data_models/__init__.py b/src/dialogs/data_models/__init__.py index 7171365..6b200e2 100644 --- a/src/dialogs/data_models/__init__.py +++ b/src/dialogs/data_models/__init__.py @@ -1,4 +1,4 @@ -from .user_profile import UserProfile +from .room_reservation import RoomReservation -__all__ = ["UserProfile"] +__all__ = ["RoomReservation"] diff --git a/src/dialogs/data_models/room_reservation.py b/src/dialogs/data_models/room_reservation.py new file mode 100644 index 0000000..1c31644 --- /dev/null +++ b/src/dialogs/data_models/room_reservation.py @@ -0,0 +1,9 @@ + +class RoomReservation: + """Hotel's room reservation state.""" + + def __init__(self, people: int = None, duration: int = None, breakfast: bool = None): + + self.people: int = people # Number of people + self.duration: int = duration # Number of nights + self.breakfast: bool = breakfast # If they take breakfast diff --git a/src/dialogs/data_models/user_profile.py b/src/dialogs/data_models/user_profile.py deleted file mode 100644 index b079352..0000000 --- a/src/dialogs/data_models/user_profile.py +++ /dev/null @@ -1,14 +0,0 @@ - -from botbuilder.schema import Attachment - - -class UserProfile: - """ - This is our application state. Just a regular serializable Python class. - """ - - def __init__(self, name: str = None, transport: str = None, age: int = 0, picture: Attachment = None): - self.name = name - self.transport = transport - self.age = age - self.picture = picture diff --git a/src/dialogs/helpers/__init__.py b/src/dialogs/helpers/__init__.py new file mode 100644 index 0000000..cb98e27 --- /dev/null +++ b/src/dialogs/helpers/__init__.py @@ -0,0 +1,4 @@ + +from .dialogs_helper import DialogHelper + +__all__ = ["DialogHelper"] diff --git a/src/dialogs/dialogs_helper.py b/src/dialogs/helpers/dialogs_helper.py similarity index 100% rename from src/dialogs/dialogs_helper.py rename to src/dialogs/helpers/dialogs_helper.py diff --git a/src/dialogs/helpers/nlu_helper.py b/src/dialogs/helpers/nlu_helper.py new file mode 100644 index 0000000..e69de29 diff --git a/src/dialogs/example.py b/src/dialogs/room_reservation_dialog.py similarity index 76% rename from src/dialogs/example.py rename to src/dialogs/room_reservation_dialog.py index 653ddae..bda5a33 100644 --- a/src/dialogs/example.py +++ b/src/dialogs/room_reservation_dialog.py @@ -4,16 +4,16 @@ from botbuilder.dialogs.choices import Choice from botbuilder.core import MessageFactory, UserState -from .data_models import UserProfile +from .data_models import RoomReservation -class UserProfileDialog(ComponentDialog): +class RoomReservationDialog(ComponentDialog): def __init__(self, user_state: UserState): - super(UserProfileDialog, self).__init__(UserProfileDialog.__name__) + super(RoomReservationDialog, self).__init__(RoomReservationDialog.__name__) # Load the UserProfile class - self.user_profile_accessor = user_state.create_property("UserProfile") + self.room_reservation_accessor = user_state.create_property("RoomReservation") # Setup the waterfall dialog self.add_dialog(WaterfallDialog(WaterfallDialog.__name__, [ @@ -50,14 +50,14 @@ async def people_step(step_context: WaterfallStepContext) -> DialogTurnResult: async def nights_step(step_context: WaterfallStepContext) -> DialogTurnResult: # Save the number of people - step_context.values["peoples"] = step_context.result.value + step_context.values["people"] = step_context.result.value # Confirm the number of people await step_context.context.send_activity( MessageFactory.text(f"Okay, for {step_context.result.value}") ) - # NumberPrompt - How many nights ? + # NumberPrompt - How many nights ? (duration) return await step_context.prompt( NumberPrompt.__name__, PromptOptions( @@ -69,7 +69,7 @@ async def nights_step(step_context: WaterfallStepContext) -> DialogTurnResult: async def breakfast_step(step_context: WaterfallStepContext) -> DialogTurnResult: # Save the number of nights - step_context.values["nights"] = step_context.result + step_context.values["duration"] = step_context.result # ConfirmPrompt - Is taking breakfast ? return await step_context.prompt( @@ -81,7 +81,10 @@ async def breakfast_step(step_context: WaterfallStepContext) -> DialogTurnResult async def summary_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: - # If the user said True: + # Save if the user take the breakfast (bool) + step_context.values["breakfast"] = step_context.result + + # If the user said "Yes": if step_context.result: # Confirm breakfast hour @@ -90,16 +93,13 @@ async def summary_step(self, step_context: WaterfallStepContext) -> DialogTurnRe ) # Save information to Reservation object - user_profile = await self.user_profile_accessor.get( - step_context.context, UserProfile + room_reservation = await self.room_reservation_accessor.get( + step_context.context, RoomReservation ) - """ - user_profile.transport = step_context.values["transport"] - user_profile.name = step_context.values["name"] - user_profile.age = step_context.values["age"] - user_profile.picture = step_context.values["picture"] - """ + room_reservation.people = step_context.values["people"] + room_reservation.duration = step_context.values["duration"] + room_reservation.breakfast = step_context.values["breakfast"] # End the dialog await step_context.context.send_activity( From 54b88129db4dedb5656838bd59876cba565bae63 Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sun, 7 Feb 2021 02:41:44 +0100 Subject: [PATCH 7/8] Simple dialogs for hotel's room booking fully implemented --- main.py | 15 ++- src/bot.py | 12 +- src/dialogs/__init__.py | 5 +- src/dialogs/booking_room_dialog.py | 179 +++++++++++++++++++++++++ src/dialogs/helpers/__init__.py | 3 +- src/dialogs/helpers/nlu_helper.py | 10 ++ src/dialogs/main_dialog.py | 98 ++++++++++++++ src/dialogs/room_reservation_dialog.py | 109 --------------- src/dialogs/utils/__init__.py | 4 + src/dialogs/utils/emoji.py | 7 + src/nlu/__init__.py | 3 +- src/nlu/classifying/classifier.py | 7 +- src/nlu/intent.py | 15 +++ src/nlu/nlu.py | 13 +- 14 files changed, 350 insertions(+), 130 deletions(-) create mode 100644 src/dialogs/booking_room_dialog.py create mode 100644 src/dialogs/main_dialog.py delete mode 100644 src/dialogs/room_reservation_dialog.py create mode 100644 src/dialogs/utils/__init__.py create mode 100644 src/dialogs/utils/emoji.py create mode 100644 src/nlu/intent.py diff --git a/main.py b/main.py index 0073228..6a24ddc 100644 --- a/main.py +++ b/main.py @@ -10,7 +10,8 @@ from botbuilder.core.integration import aiohttp_error_middleware from botbuilder.schema import Activity, ActivityTypes -from src.dialogs import RoomReservationDialog +from src.dialogs import MainDialog, BookingRoomDialog +from src.nlu import NLU from src import Bot from config import Config @@ -63,9 +64,15 @@ async def on_error(context: TurnContext, error_: Exception): CONVERSATION_STATE = ConversationState(MEMORY) USER_STATE = UserState(MEMORY) -# Create main dialog and bot -DIALOG = RoomReservationDialog(USER_STATE) -bot = Bot(CONVERSATION_STATE, USER_STATE, DIALOG) +# Load the NLU recognizer +nlu = NLU() + +# Create the dialogs +dialog_room_reservation = BookingRoomDialog(nlu, USER_STATE) +dialog_main = MainDialog(nlu, USER_STATE, dialog_room_reservation) + +# Create the bot +bot = Bot(CONVERSATION_STATE, USER_STATE, dialog_main) # Direct message API diff --git a/src/bot.py b/src/bot.py index e7fc5f3..e486862 100644 --- a/src/bot.py +++ b/src/bot.py @@ -1,11 +1,10 @@ +from botbuilder.schema import ChannelAccount from botbuilder.core import ActivityHandler, TurnContext, ConversationState, UserState from botbuilder.dialogs import Dialog +from .dialogs.utils import Emoji from .dialogs.helpers import DialogHelper -from .nlu import NLU - -nlu = NLU() class Bot(ActivityHandler): @@ -16,6 +15,13 @@ def __init__(self, conversation_state: ConversationState, user_state: UserState, self.user_state = user_state self.dialog = dialog + async def on_members_added_activity(self, members_added: [ChannelAccount], turn_context: TurnContext): + + # Send an "Hello" to any new user connected to the bot + for member in members_added: + if member.id != turn_context.activity.recipient.id: + await turn_context.send_activity(f"Hello {Emoji.WAVING_HAND.value}") + async def on_turn(self, turn_context: TurnContext): await super().on_turn(turn_context) diff --git a/src/dialogs/__init__.py b/src/dialogs/__init__.py index 4c695f0..a928908 100644 --- a/src/dialogs/__init__.py +++ b/src/dialogs/__init__.py @@ -1,4 +1,5 @@ -from .room_reservation_dialog import RoomReservationDialog +from .booking_room_dialog import BookingRoomDialog +from .main_dialog import MainDialog -__all__ = ["RoomReservationDialog"] +__all__ = ["BookingRoomDialog", "MainDialog"] diff --git a/src/dialogs/booking_room_dialog.py b/src/dialogs/booking_room_dialog.py new file mode 100644 index 0000000..d24867e --- /dev/null +++ b/src/dialogs/booking_room_dialog.py @@ -0,0 +1,179 @@ + +from botbuilder.schema import ChannelAccount, CardAction, ActionTypes, SuggestedActions, Activity, ActivityTypes +from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext, DialogTurnResult +from botbuilder.dialogs.prompts import TextPrompt, NumberPrompt, ChoicePrompt, ConfirmPrompt, AttachmentPrompt, PromptOptions, PromptValidatorContext +from botbuilder.dialogs.choices import Choice +from botbuilder.core import MessageFactory, UserState + +from src.nlu import Intent, NLU +from .utils import Emoji +from .helpers import NLUHelper +from .data_models import RoomReservation + + +class BookingRoomDialog(ComponentDialog): + + def __init__(self, nlu_recognizer: NLU, user_state: UserState): + super(BookingRoomDialog, self).__init__(BookingRoomDialog.__name__) + + # Load the NLU module + self._nlu_recognizer = nlu_recognizer + + # Load the RoomReservation class + self.room_reservation_accessor = user_state.create_property("RoomReservation") + + # Setup the waterfall dialog + self.add_dialog(WaterfallDialog("WFBookingDialog", [ + self.people_step, + self.duration_step, + self.breakfast_step, + self.summary_step, + ])) + + # Append the prompts and custom prompts + self.add_dialog(NumberPrompt("PeoplePrompt", BookingRoomDialog.people_prompt_validator)) + self.add_dialog(NumberPrompt("DurationPrompt", BookingRoomDialog.duration_prompt_validator)) + self.add_dialog(ConfirmPrompt("IsTakingBreakfastPrompt")) + + self.initial_dialog_id = "WFBookingDialog" + + @staticmethod + async def people_step(step_context: WaterfallStepContext) -> DialogTurnResult: + """Ask the user: how many people to make the reservation?""" + + # Retrieve the booking keywords + booking_keywords: dict = step_context.options + step_context.values['booking_keywords'] = booking_keywords + + # If the keyword 'people' exists and is filled, pass the question + if 'people' in booking_keywords and booking_keywords['people'] is not None: + return await step_context.next(booking_keywords['people']) + + # Give user suggestions (1 or 2 people). + # The user can still write a custom number of people [1, 4]. + options = PromptOptions( + prompt=Activity( + + type=ActivityTypes.message, + text="Would you like a single or a double room?", + + suggested_actions=SuggestedActions( + actions=[ + CardAction( + title="Single", + type=ActionTypes.im_back, + value="Single room (1 people)" + ), + CardAction( + title="Double", + type=ActionTypes.im_back, + value="Double room (2 peoples)" + ) + ] + ) + ), + retry_prompt=MessageFactory.text( + "Reservations can be made for one to four people only." + ) + ) + + # NumberPrompt - How many people ? + return await step_context.prompt( + "PeoplePrompt", + options + ) + + @staticmethod + async def duration_step(step_context: WaterfallStepContext) -> DialogTurnResult: + """Ask the user: how many night to reserve?""" + + # Save the number of people + step_context.values["people"] = step_context.result + + # Retrieve the keywords + booking_keywords: dict = step_context.values["booking_keywords"] + + # If the keyword 'duration' exists and is filled, pass the question + if 'duration' in booking_keywords and booking_keywords['duration'] is not None: + return await step_context.next(booking_keywords['duration']) + + # NumberPrompt - How many nights ? (duration) + return await step_context.prompt( + "DurationPrompt", + PromptOptions( + prompt=MessageFactory.text("How long do you want to stay?"), + retry_prompt=MessageFactory.text( + "It is only possible to book from 1 to 7 nights" + ), + ), + ) + + @staticmethod + async def breakfast_step(step_context: WaterfallStepContext) -> DialogTurnResult: + + # Save the number of nights + step_context.values["duration"] = step_context.result + + # Confirm people and duration + await step_context.context.send_activity( + MessageFactory.text( + f"Okay, so {step_context.values['people']} people for {step_context.values['duration']} nights" + ) + ) + + # ConfirmPrompt - Is taking breakfast ? + return await step_context.prompt( + "IsTakingBreakfastPrompt", + PromptOptions( + prompt=MessageFactory.text("Will you be having breakfast?") + ), + ) + + async def summary_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: + + # Save if the user take the breakfast (bool) + step_context.values["breakfast"] = step_context.result + + # If the user said "Yes": + if step_context.result: + + # Confirm breakfast hour + await step_context.context.send_activity( + MessageFactory.text(f"Perfect, breakfast is from 6am to 10am") + ) + + # Save information to Reservation object + room_reservation = await self.room_reservation_accessor.get( + step_context.context, RoomReservation + ) + + room_reservation.people = step_context.values["people"] + room_reservation.duration = step_context.values["duration"] + room_reservation.breakfast = step_context.values["breakfast"] + + # End the dialog + await step_context.context.send_activity( + MessageFactory.text("Your booking has been made !") + ) + + return await step_context.end_dialog() + + @staticmethod + async def people_prompt_validator(prompt_context: PromptValidatorContext) -> bool: + """Validate the number of people entered by the user.""" + + # Restrict people between [1 and 4]. + return ( + prompt_context.recognized.succeeded + and 1 <= prompt_context.recognized.value <= 4 + ) + + @staticmethod + async def duration_prompt_validator(prompt_context: PromptValidatorContext) -> bool: + """Validate the number of nights entered by the user.""" + + # Restrict nights between [1 and 7]. + return ( + prompt_context.recognized.succeeded + and 1 <= prompt_context.recognized.value <= 7 + ) diff --git a/src/dialogs/helpers/__init__.py b/src/dialogs/helpers/__init__.py index cb98e27..62e19ca 100644 --- a/src/dialogs/helpers/__init__.py +++ b/src/dialogs/helpers/__init__.py @@ -1,4 +1,5 @@ from .dialogs_helper import DialogHelper +from .nlu_helper import NLUHelper -__all__ = ["DialogHelper"] +__all__ = ["DialogHelper", "NLUHelper"] diff --git a/src/dialogs/helpers/nlu_helper.py b/src/dialogs/helpers/nlu_helper.py index e69de29..0ece14a 100644 --- a/src/dialogs/helpers/nlu_helper.py +++ b/src/dialogs/helpers/nlu_helper.py @@ -0,0 +1,10 @@ + +from src.nlu import Intent, NLU + + +class NLUHelper: + + @staticmethod + async def execute_nlu_query(nlu_recognizer: NLU, message: str) -> (Intent, dict): + + return nlu_recognizer.get_intent(message) diff --git a/src/dialogs/main_dialog.py b/src/dialogs/main_dialog.py new file mode 100644 index 0000000..51706ac --- /dev/null +++ b/src/dialogs/main_dialog.py @@ -0,0 +1,98 @@ + +from botbuilder.schema import InputHints +from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext, DialogTurnResult +from botbuilder.dialogs.prompts import TextPrompt, PromptOptions +from botbuilder.core import MessageFactory, UserState + +from src.nlu import Intent, NLU +from . import BookingRoomDialog +from .utils import Emoji +from .helpers import NLUHelper + + +class MainDialog(ComponentDialog): + + def __init__(self, nlu_recognizer: NLU, user_state: UserState, + booking_room_dialog: BookingRoomDialog): + + super(MainDialog, self).__init__(MainDialog.__name__) + + # Load the NLU module + self._nlu_recognizer = nlu_recognizer + + # Load the sub-dialogs + self._booking_dialog_id = booking_room_dialog.id + + # Setup the waterfall dialog + self.add_dialog(WaterfallDialog(WaterfallDialog.__name__, [ + self.intro_step, + self.act_step, + self.final_step + ])) + + # Append the prompts and custom dialogs, used in the waterfall + self.add_dialog(TextPrompt("ActPrompt")) + self.add_dialog(booking_room_dialog) + + self.initial_dialog_id = WaterfallDialog.__name__ + + @staticmethod + async def intro_step(step_context: WaterfallStepContext) -> DialogTurnResult: + """ + Intro step. Triggered upon any interaction from the user to this bot. + """ + + # Ask what to do + message = ( + str(step_context.options) + if step_context.options + else "What can I help you with today?" + ) + + # TextPromp - How can I help you ? + return await step_context.prompt( + "ActPrompt", + PromptOptions( + prompt=MessageFactory.text(message) + ), + ) + + async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: + """ + Act step. Take user response and infer its intention. + Dispatch to the desired sub-dialog + """ + + intent, keywords = await NLUHelper.execute_nlu_query( + self._nlu_recognizer, step_context.result + ) + + # Run the BookingRoomDialog, passing it keywords from nlu + if intent == Intent.BOOK_ROOM: + return await step_context.begin_dialog(self._booking_dialog_id, keywords) + + # If no intent was understood, return a didn't understand message + else: + didnt_understand_text = ( + "Sorry, I didn't get that. Please try asking in a different way" + ) + + await step_context.context.send_activity( + MessageFactory.text( + didnt_understand_text, didnt_understand_text, InputHints.ignoring_input + ) + ) + + return await step_context.next(None) + + async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: + """ + Final step. Triggered upon sub-dialog completion. Replace the current + dialog by the main dialog to start a new loop of conversation. + """ + + # Replace the current dialog back to main dialog + return await step_context.replace_dialog( + self.id, + "What else can I do for you?" + ) diff --git a/src/dialogs/room_reservation_dialog.py b/src/dialogs/room_reservation_dialog.py deleted file mode 100644 index bda5a33..0000000 --- a/src/dialogs/room_reservation_dialog.py +++ /dev/null @@ -1,109 +0,0 @@ - -from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext, DialogTurnResult -from botbuilder.dialogs.prompts import TextPrompt, NumberPrompt, ChoicePrompt, ConfirmPrompt, AttachmentPrompt, PromptOptions, PromptValidatorContext -from botbuilder.dialogs.choices import Choice -from botbuilder.core import MessageFactory, UserState - -from .data_models import RoomReservation - - -class RoomReservationDialog(ComponentDialog): - - def __init__(self, user_state: UserState): - super(RoomReservationDialog, self).__init__(RoomReservationDialog.__name__) - - # Load the UserProfile class - self.room_reservation_accessor = user_state.create_property("RoomReservation") - - # Setup the waterfall dialog - self.add_dialog(WaterfallDialog(WaterfallDialog.__name__, [ - self.people_step, - self.nights_step, - self.breakfast_step, - self.summary_step, - ])) - - # Append the prompts and custom prompts - # self.add_dialog(TextPrompt(TextPrompt.__name__)) - self.add_dialog(NumberPrompt(NumberPrompt.__name__)) - self.add_dialog(ChoicePrompt(ChoicePrompt.__name__)) - self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__)) - - self.initial_dialog_id = WaterfallDialog.__name__ - - @staticmethod - async def people_step(step_context: WaterfallStepContext) -> DialogTurnResult: - - # ChoicePrompt - How many people ? - return await step_context.prompt( - ChoicePrompt.__name__, - PromptOptions( - prompt=MessageFactory.text("What size room will you need?"), - choices=[ - Choice("2 peoples"), - Choice("4 peoples"), - ], - ), - ) - - @staticmethod - async def nights_step(step_context: WaterfallStepContext) -> DialogTurnResult: - - # Save the number of people - step_context.values["people"] = step_context.result.value - - # Confirm the number of people - await step_context.context.send_activity( - MessageFactory.text(f"Okay, for {step_context.result.value}") - ) - - # NumberPrompt - How many nights ? (duration) - return await step_context.prompt( - NumberPrompt.__name__, - PromptOptions( - prompt=MessageFactory.text("How long do you want to stay?") - ), - ) - - @staticmethod - async def breakfast_step(step_context: WaterfallStepContext) -> DialogTurnResult: - - # Save the number of nights - step_context.values["duration"] = step_context.result - - # ConfirmPrompt - Is taking breakfast ? - return await step_context.prompt( - ConfirmPrompt.__name__, - PromptOptions( - prompt=MessageFactory.text("Will you be having breakfast?") - ), - ) - - async def summary_step(self, step_context: WaterfallStepContext) -> DialogTurnResult: - - # Save if the user take the breakfast (bool) - step_context.values["breakfast"] = step_context.result - - # If the user said "Yes": - if step_context.result: - - # Confirm breakfast hour - await step_context.context.send_activity( - MessageFactory.text(f"Perfect, breakfast is from 6am to 10am.") - ) - - # Save information to Reservation object - room_reservation = await self.room_reservation_accessor.get( - step_context.context, RoomReservation - ) - - room_reservation.people = step_context.values["people"] - room_reservation.duration = step_context.values["duration"] - room_reservation.breakfast = step_context.values["breakfast"] - - # End the dialog - await step_context.context.send_activity( - MessageFactory.text("Thanks. See you !") - ) - - return await step_context.end_dialog() diff --git a/src/dialogs/utils/__init__.py b/src/dialogs/utils/__init__.py new file mode 100644 index 0000000..d43a4a5 --- /dev/null +++ b/src/dialogs/utils/__init__.py @@ -0,0 +1,4 @@ + +from .emoji import Emoji + +__all__ = ['Emoji'] diff --git a/src/dialogs/utils/emoji.py b/src/dialogs/utils/emoji.py new file mode 100644 index 0000000..3d1dae2 --- /dev/null +++ b/src/dialogs/utils/emoji.py @@ -0,0 +1,7 @@ + +from enum import Enum + + +class Emoji(Enum): + + WAVING_HAND = "\U0001F44B" diff --git a/src/nlu/__init__.py b/src/nlu/__init__.py index d17c5f8..8e8fbe6 100644 --- a/src/nlu/__init__.py +++ b/src/nlu/__init__.py @@ -1,4 +1,5 @@ +from .intent import Intent from .nlu import NLU -__all__ = ["NLU"] +__all__ = ["NLU", "Intent"] diff --git a/src/nlu/classifying/classifier.py b/src/nlu/classifying/classifier.py index d187328..ab63a37 100644 --- a/src/nlu/classifying/classifier.py +++ b/src/nlu/classifying/classifier.py @@ -5,6 +5,7 @@ import torch from transformers import BertTokenizer, BertForSequenceClassification +from src.nlu import Intent from config import Config config = Config() @@ -68,7 +69,7 @@ def _load_model(self) -> BertForSequenceClassification: return model - def predict(self, dataset: BertTokenizer): + def predict(self, dataset: BertTokenizer) -> Intent: """Make a prediction and return the class.""" # Make the prediction, get an array of probabilities @@ -81,5 +82,5 @@ def predict(self, dataset: BertTokenizer): # Get the predicted class index _, predicted_index = torch.max(probabilities[0], dim=1) - # Return the class name - return self.labels[predicted_index.data[0].item()] + # Return the intent + return Intent(self.labels[predicted_index[0].item()]) diff --git a/src/nlu/intent.py b/src/nlu/intent.py new file mode 100644 index 0000000..adce4a1 --- /dev/null +++ b/src/nlu/intent.py @@ -0,0 +1,15 @@ + +from enum import Enum + + +class Intent(Enum): + + # Yes/No + YES = "smalltalk_confirmation_yes" + NO = "smalltalk_confirmation_no" + + # Small talks + GREETINGS = "smalltalk_greetings_hello" + + # Hotel long talks + BOOK_ROOM = "longtalk_make_reservation" diff --git a/src/nlu/nlu.py b/src/nlu/nlu.py index c59370a..c46a4e8 100644 --- a/src/nlu/nlu.py +++ b/src/nlu/nlu.py @@ -1,9 +1,8 @@ -from typing import Tuple - -from src.nlu.matching import Matcher -from src.nlu.preprocessing import Preprocessor, Tokenizer -from src.nlu.classifying import Classifier +from . import Intent +from .matching import Matcher +from .preprocessing import Preprocessor, Tokenizer +from .classifying import Classifier class NLU: @@ -18,7 +17,7 @@ def __init__(self): self.classifier = Classifier() self.matcher = Matcher() - def get_intent(self, message: str) -> Tuple[str, dict]: + def get_intent(self, message: str) -> (Intent, dict): """ Return the intention and the keywords of a given message. """ @@ -29,6 +28,6 @@ def get_intent(self, message: str) -> Tuple[str, dict]: # Get the intention intent = self.classifier.predict(dataset) - keywords = self.matcher.get_keywords(preprocessed_text, intent) + keywords = self.matcher.get_keywords(preprocessed_text, intent.value) return intent, keywords From 89cfa81d771c122dabe20ddba09b133d598279a8 Mon Sep 17 00:00:00 2001 From: Joffrey Bienvenu Date: Sun, 7 Feb 2021 03:18:45 +0100 Subject: [PATCH 8/8] Model loader uncommented --- src/nlu/classifying/classifier.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nlu/classifying/classifier.py b/src/nlu/classifying/classifier.py index ab63a37..26c0278 100644 --- a/src/nlu/classifying/classifier.py +++ b/src/nlu/classifying/classifier.py @@ -51,7 +51,7 @@ def _load_model(self) -> BertForSequenceClassification: """ # Download and save the weights locally - # self.__load_remote_file(config.MODEL_WEIGHT_URL, config.MODEL_WEIGHT_LOCAL_COPY) + self.__load_remote_file(config.MODEL_WEIGHT_URL, config.MODEL_WEIGHT_LOCAL_COPY) # Instantiate the model model = BertForSequenceClassification.from_pretrained(