Skip to content

Commit

Permalink
Expecting input should only turn on microphone if via microphone (#2166)
Browse files Browse the repository at this point in the history
* Expecting input should only turn on microphone if via microphone

* Update entry

* Remove a test
  • Loading branch information
compulim authored Jul 10, 2019
1 parent b0c803e commit d1b9b97
Show file tree
Hide file tree
Showing 7 changed files with 189 additions and 45 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Fix [#2134](https://github.com/microsoft/BotFramework-WebChat/issues/2134). Added `azure-pipelines.yml` for embed package, by [@compulim](https://github.com/compulim) in PR [#2135](https://github.com/microsoft/BotFramework-WebChat/pull/2135)
- Fix [#2106](https://github.com/microsoft/BotFramework-WebChat/issues/2016). Fix `AdaptiveCardHostConfig` warning associated with the `CommonCard` component, by [@tdurnford](https://github.com/tdurnford) in PR [#2108](https://github.com/microsoft/BotFramework-WebChat/pull/2108)
- Fix [#1872](https://github.com/microsoft/BotFramework-WebChat/issues/1872). Fixed `observeOnce` to unsubscribe properly, by [@compulim](https://github.com/compulim) in PR [#2140](https://github.com/microsoft/BotFramework-WebChat/pull/2140)
- Fix [#2022](https://github.com/microsoft/BotFramework-WebChat/issues/2022). Fixed `"expectingInput"` in `inputHint` is not respected, by [@compulim](https://github.com/compulim) and [@corinagum](https://github.com/corinagum) in PR [#2149](https://github.com/microsoft/BotFramework-WebChat/pull/2149)
- Fix [#2022](https://github.com/microsoft/BotFramework-WebChat/issues/2022). Fixed `"expectingInput"` in `inputHint` is not respected, by [@compulim](https://github.com/compulim) and [@corinagum](https://github.com/corinagum) in PR [#2149](https://github.com/microsoft/BotFramework-WebChat/pull/2149) and PR [#2166](https://github.com/microsoft/BotFramework-WebChat/pull/2166)

### Samples

Expand Down
165 changes: 165 additions & 0 deletions __tests__/inputHint.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
import { timeouts } from './constants.json';

import isRecognizingSpeech from './setup/pageObjects/isRecognizingSpeech';
import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown';
import speechSynthesisPending from './setup/conditions/speechSynthesisPending';
import uiConnected from './setup/conditions/uiConnected';

// selenium-webdriver API doc:
// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html

jest.setTimeout(timeouts.test);

describe('input hint', () => {
describe('of expectingInput', async () => {
test('should turn on microphone if initiated via microphone', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaMicrophone('hint expecting input');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

await driver.wait(speechSynthesisPending(), timeouts.ui);
await pageObjects.startSpeechSynthesize();
await pageObjects.endSpeechSynthesize();

expect(isRecognizingSpeech(driver)).resolves.toBeTruthy();
});

test('should not turn on microphone if initiated via typing', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaMicrophone('hint expecting input');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});
});

describe('of acceptingInput', async () => {
test('should not turn on microphone if initiated via microphone', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaMicrophone('hint accepting input');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

await driver.wait(speechSynthesisPending(), timeouts.ui);
await pageObjects.startSpeechSynthesize();
await pageObjects.endSpeechSynthesize();

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});

test('should not turn on microphone if initiated via typing', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaSendBox('hint accepting input');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});
});

describe('of ignoringInput', async () => {
test('should turn off microphone if initiated via microphone', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaMicrophone('hint ignoring input');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

await driver.wait(speechSynthesisPending(), timeouts.ui);
await pageObjects.startSpeechSynthesize();
await pageObjects.endSpeechSynthesize();

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});

test('should turn off microphone if initiated via typing', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaSendBox('hint ignoring input');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});
});

describe('of undefined', async () => {
test('should not turn on microphone if initiated via microphone', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaMicrophone('hint undefined');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

await driver.wait(speechSynthesisPending(), timeouts.ui);
await pageObjects.startSpeechSynthesize();
await pageObjects.endSpeechSynthesize();

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});

test('should not turn on microphone if initiated via typing', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

await pageObjects.sendMessageViaSendBox('hint undefined');

await driver.wait(minNumActivitiesShown(2), timeouts.directLine);

expect(isRecognizingSpeech(driver)).resolves.toBeFalsy();
});
});
});
2 changes: 2 additions & 0 deletions __tests__/setup/pageObjects/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import hasPendingSpeechSynthesisUtterance from './hasPendingSpeechSynthesisUtter
import isRecognizingSpeech from './isRecognizingSpeech';
import pingBot from './pingBot';
import putSpeechRecognitionResult from './putSpeechRecognitionResult';
import sendMessageViaMicrophone from './sendMessageViaMicrophone';
import sendMessageViaSendBox from './sendMessageViaSendBox';
import startSpeechSynthesize from './startSpeechSynthesize';

Expand All @@ -30,6 +31,7 @@ export default function pageObjects(driver) {
isRecognizingSpeech,
pingBot,
putSpeechRecognitionResult,
sendMessageViaMicrophone,
sendMessageViaSendBox,
startSpeechSynthesize
},
Expand Down
16 changes: 16 additions & 0 deletions __tests__/setup/pageObjects/sendMessageViaMicrophone.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import { timeouts } from '../../constants.json';
import allOutgoingActivitiesSent from '../conditions/allOutgoingActivitiesSent';
import getMicrophoneButton from './getMicrophoneButton';
import putSpeechRecognitionResult from './putSpeechRecognitionResult';
import speechRecognitionStarted from '../conditions/speechRecognitionStarted';

export default async function sendMessageViaMicrophone(driver, text, { waitForSend = true } = {}) {
const microphoneButton = await getMicrophoneButton(driver);

await microphoneButton.click();

await driver.wait(speechRecognitionStarted(), timeouts.ui);
await putSpeechRecognitionResult(driver, 'recognize', text);

waitForSend && (await driver.wait(allOutgoingActivitiesSent(), timeouts.directLine));
}
2 changes: 1 addition & 1 deletion __tests__/setup/pageObjects/sendMessageViaSendBox.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { timeouts } from '../../constants.json';
import allOutgoingActivitiesSent from '../conditions/allOutgoingActivitiesSent';
import getSendBoxTextBox from './getSendBoxTextBox';

export default async function sendMessageViaSendBox(driver, text, { waitForSend = true }) {
export default async function sendMessageViaSendBox(driver, text, { waitForSend = true } = {}) {
const input = await getSendBoxTextBox(driver);

await input.sendKeys(text, Key.RETURN);
Expand Down
45 changes: 3 additions & 42 deletions __tests__/speech.js
Original file line number Diff line number Diff line change
@@ -1,64 +1,25 @@
import { imageSnapshotOptions, timeouts } from './constants.json';
import { timeouts } from './constants.json';

import allOutgoingActivitiesSent from './setup/conditions/allOutgoingActivitiesSent.js';
import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown';
import speechRecognitionStarted, {
negate as speechRecognitionNotStarted
} from './setup/conditions/speechRecognitionStarted';
import { negate as speechRecognitionNotStarted } from './setup/conditions/speechRecognitionStarted';
import speechSynthesisPending, { negate as speechSynthesisNotPending } from './setup/conditions/speechSynthesisPending';
import uiConnected from './setup/conditions/uiConnected';

// selenium-webdriver API doc:
// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html

jest.setTimeout(timeouts.test);

describe('speech recognition', () => {
test('should send on successful recognition', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

await driver.wait(uiConnected(), timeouts.directLine);

const microphoneButton = await pageObjects.getMicrophoneButton();

await microphoneButton.click();

await driver.wait(speechRecognitionStarted(), timeouts.ui);
await pageObjects.putSpeechRecognitionResult('recognize', 'Hello, World!');
await driver.wait(minNumActivitiesShown(2), timeouts.directLine);
await driver.wait(allOutgoingActivitiesSent(), timeouts.directLine);
await driver.wait(speechSynthesisPending(), timeouts.ui);

const utterance = await pageObjects.startSpeechSynthesize();

expect(utterance).toHaveProperty(
'text',
`Unknown command: I don't know Hello, World!. You can say \"help\" to learn more.`
);

await pageObjects.endSpeechSynthesize();
await driver.wait(speechRecognitionStarted(), timeouts.ui);
});

test('should not start recognition after typing on keyboard while synthesizing', async () => {
const { driver, pageObjects } = await setupWebDriver({
props: {
webSpeechPonyfillFactory: () => window.WebSpeechMock
}
});

const microphoneButton = await pageObjects.getMicrophoneButton();

await microphoneButton.click();
await pageObjects.sendMessageViaMicrophone('Hello, World!');

await driver.wait(speechRecognitionStarted(), timeouts.ui);
await pageObjects.putSpeechRecognitionResult('recognize', 'Hello, World!');
await driver.wait(minNumActivitiesShown(2), timeouts.directLine);
await driver.wait(allOutgoingActivitiesSent(), timeouts.directLine);
await driver.wait(speechSynthesisPending(), timeouts.ui);

const utterance = await pageObjects.startSpeechSynthesize();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ function* speakActivityAndStartDictateOnIncomingActivityFromOthers({ userID }) {
yield put(markActivity(activity, 'speak', true));
}

if (activity.inputHint === 'expectingInput' || (shouldSpeak && activity.inputHint !== 'ignoringInput')) {
if (shouldSpeak && activity.inputHint === 'expectingInput') {
yield put(startDictate());
} else if (activity.inputHint === 'ignoringInput') {
yield put(stopDictate());
Expand Down

0 comments on commit d1b9b97

Please sign in to comment.