Skip to content

Commit

Permalink
Should not synthesis without speak property (#3059)
Browse files Browse the repository at this point in the history
* Should not synthesis without speak property

* Add entry

* Update CHANGELOG.md

Co-Authored-By: TJ Durnford <[email protected]>

Co-authored-by: TJ Durnford <[email protected]>
  • Loading branch information
compulim and tdurnford authored Apr 2, 2020
1 parent 903f181 commit 1855c73
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 11 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Fixes [#2818](https://github.com/microsoft/BotFramework-WebChat/issues/2818). Fix user ID is not set when passing to embed as query parameter, by [@p-nagpal](https://github.com/p-nagpal) in PR [#3031](https://github.com/microsoft/BotFramework-WebChat/pull/3031)
- Fixes [#3026](https://github.com/microsoft/BotFramework-WebChat/issues/3026). Fix link `rel` attribute in the `renderMarkdown` function, by [@tdurnford](https://github.com/tdurnford) in PR [#3033](https://github.com/microsoft/BotFramework-WebChat/pull/3033)
- Fixes [#2933](https://github.com/microsoft/BotFramework-WebChat/issues/2933). Fix `text` should not be ignored in `messageBack` action in hero card, by [@geea-develop](https://github.com/geea-develop) and [@compulim](https://github.com/compulim) in PR [#3003](https://github.com/microsoft/BotFramework-WebChat/pull/3003)
- Fixes [#2953](https://github.com/microsoft/BotFramework-WebChat/issues/2953). Direct Line Speech should not synthesize when the `speak` property is falsy, by [@compulim](https://github.com/compulim) in PR [#3059](https://github.com/microsoft/BotFramework-WebChat/pull/3059)

### Changed

Expand Down
5 changes: 5 additions & 0 deletions packages/directlinespeech/__tests__/constants.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"timeouts": {
"test": 15000
}
}
21 changes: 20 additions & 1 deletion packages/directlinespeech/__tests__/sendSpeechActivity.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,15 @@

import 'global-agent/bootstrap';

import { timeouts } from './constants.json';
import createTestHarness from './utilities/createTestHarness';
import MockAudioContext from './utilities/MockAudioContext';
import recognizeActivityAsText from './utilities/recognizeActivityAsText';
import subscribeAll from './utilities/observable/subscribeAll';
import take from './utilities/observable/take';
import waitForConnected from './utilities/waitForConnected';

jest.setTimeout(15000);
jest.setTimeout(timeouts.test);

beforeEach(() => {
global.AudioContext = MockAudioContext;
Expand Down Expand Up @@ -58,3 +59,21 @@ test('should echo back "Bellevue" when saying "bellview"', async () => {
]
`);
});

test('should not synthesis when "speak" is empty', async () => {
const { directLine, sendTextAsSpeech } = await createTestHarness();

const connectedPromise = waitForConnected(directLine);
const activitiesPromise = subscribeAll(take(directLine.activity$, 1));

await connectedPromise;

// "Don't speak XXX" command will not send "speak" property on respond.
await sendTextAsSpeech("Don't speak anything.");

const activities = await activitiesPromise;
const activityUtterances = await Promise.all(activities.map(activity => recognizeActivityAsText(activity)));

expect(activityUtterances).toHaveProperty('length', 1);
expect(activityUtterances[0]).toBeFalsy();
});
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ import { EventSource } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/
import { PromiseHelper } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Promise';
import { Stream } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Stream';

const CHUNK_SIZE = 4096;

class QueuedArrayBufferAudioSource {
constructor(audioFormat, audioSourceId = createNoDashGuid()) {
this._audioFormat = audioFormat;
Expand Down Expand Up @@ -114,11 +116,15 @@ class QueuedArrayBufferAudioSource {

const arrayBuffer = this._queue.shift();

stream.writeStreamChunk({
buffer: arrayBuffer,
isEnd: false,
timeReceived: Date.now()
});
const { byteLength } = arrayBuffer;

for (let i = 0; i < byteLength; i += CHUNK_SIZE) {
stream.writeStreamChunk({
buffer: arrayBuffer.slice(i, Math.min(i + CHUNK_SIZE, byteLength)),
isEnd: false,
timeReceived: Date.now()
});
}

stream.close();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@ import readCognitiveServicesAudioStreamAsWAVArrayBuffer from './readCognitiveSer
import recognizeRiffWaveArrayBuffer from './recognizeRiffWaveArrayBuffer';

export default async function recognizeActivityAsText(activity) {
const riffWAVBuffer = await readCognitiveServicesAudioStreamAsWAVArrayBuffer(
activity.channelData.speechSynthesisUtterance.audioStream
);
const { audioStream } = activity.channelData.speechSynthesisUtterance;

return await recognizeRiffWaveArrayBuffer(riffWAVBuffer);
if (audioStream) {
const riffWAVBuffer = await readCognitiveServicesAudioStreamAsWAVArrayBuffer(audioStream);

return await recognizeRiffWaveArrayBuffer(riffWAVBuffer);
}
}
2 changes: 1 addition & 1 deletion packages/directlinespeech/src/DirectLineSpeech.js
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ export default class DirectLineSpeech {
...activity,
channelData: {
...activity.channelData,
...(audioStream ? { speechSynthesisUtterance: new SpeechSynthesisAudioStreamUtterance(audioStream) } : {})
speechSynthesisUtterance: new SpeechSynthesisAudioStreamUtterance(audioStream)
},
from: {
...activity.from,
Expand Down

0 comments on commit 1855c73

Please sign in to comment.