-
+
+
{localize(dictating ? 'SPEECH_INPUT_MICROPHONE_BUTTON_OPEN_ALT' : 'SPEECH_INPUT_MICROPHONE_BUTTON_CLOSE_ALT')}
diff --git a/packages/component/src/Styles/StyleSet/MicrophoneButton.js b/packages/component/src/Styles/StyleSet/MicrophoneButton.js
index 544c44dae7..c261f45c46 100644
--- a/packages/component/src/Styles/StyleSet/MicrophoneButton.js
+++ b/packages/component/src/Styles/StyleSet/MicrophoneButton.js
@@ -1,10 +1,9 @@
export default function createMicrophoneButtonStyle({ microphoneButtonColorOnDictate }) {
return {
- // TODO: [P3] This path should not know anything about the DOM tree of
- '&.dictating > .webchat__icon-button': {
- '&:not(:disabled):not([aria-disabled="true"])': {
+ '&.webchat__microphone-button': {
+ '&.webchat__microphone-button--dictating .webchat__microphone-button__button': {
'&, &:focus, &:hover': {
- '& svg': {
+ '& .webchat__microphone-button__icon': {
fill: microphoneButtonColorOnDictate
}
}
diff --git a/packages/directlinespeech/__tests__/refreshToken.directLineToken.js b/packages/directlinespeech/__tests__/refreshToken.directLineToken.js
index 2d8b725bc5..3f01a3a57f 100644
--- a/packages/directlinespeech/__tests__/refreshToken.directLineToken.js
+++ b/packages/directlinespeech/__tests__/refreshToken.directLineToken.js
@@ -39,7 +39,7 @@ test('should refresh Direct Line token', async () => {
jest.useFakeTimers('modern');
const { directLine } = await createTestHarness({ enableInternalHTTPSupport: true });
- const initialToken = directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_ApplicationId);
+ const initialToken = directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_Agent_Connection_Id);
// Wait until 2 seconds in real-time clock, to make sure the token renewed is different (JWT has a per-second timestamp).
await sleep(2000);
@@ -51,6 +51,6 @@ test('should refresh Direct Line token', async () => {
await waitUntil(
() =>
initialToken !==
- directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_ApplicationId, 5000)
+ directLine.dialogServiceConnector.properties.getProperty(PropertyId.Conversation_Agent_Connection_Id, 5000)
);
});
diff --git a/packages/directlinespeech/__tests__/utilities/createQueuedArrayBufferAudioSource.js b/packages/directlinespeech/__tests__/utilities/createQueuedArrayBufferAudioSource.js
index bf67251624..9efff5b6cc 100644
--- a/packages/directlinespeech/__tests__/utilities/createQueuedArrayBufferAudioSource.js
+++ b/packages/directlinespeech/__tests__/utilities/createQueuedArrayBufferAudioSource.js
@@ -10,14 +10,14 @@ import {
AudioStreamNodeDetachedEvent
} from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/AudioSourceEvents';
+import { ChunkedArrayBufferStream } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/ChunkedArrayBufferStream';
import { createNoDashGuid } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Guid';
import { Events } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Events';
import { EventSource } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/EventSource';
-import { PromiseHelper } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Promise';
-import { Stream } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Stream';
-
const CHUNK_SIZE = 4096;
+// This is copied from MicAudioSource, but instead of retrieving from MediaStream, we dump the ArrayBuffer directly.
+
class QueuedArrayBufferAudioSource {
constructor(audioFormat, audioSourceId = createNoDashGuid()) {
this._audioFormat = audioFormat;
@@ -35,10 +35,10 @@ class QueuedArrayBufferAudioSource {
this.attach = this.attach.bind(this);
this.detach = this.detach.bind(this);
this.id = this.id.bind(this);
+ this.listen = this.listen.bind(this);
this.push = this.push.bind(this);
this.turnOff = this.turnOff.bind(this);
this.turnOn = this.turnOn.bind(this);
- this.upload = this.upload.bind(this);
}
push(arrayBuffer) {
@@ -51,7 +51,7 @@ class QueuedArrayBufferAudioSource {
this.onEvent(new AudioSourceErrorEvent(errorMsg, ''));
- return PromiseHelper.fromError(errorMsg);
+ return Promise.reject(errorMsg);
}
this._queue.push(arrayBuffer);
@@ -61,32 +61,33 @@ class QueuedArrayBufferAudioSource {
this.onEvent(new AudioSourceInitializingEvent(this._id)); // no stream id
this.onEvent(new AudioSourceReadyEvent(this._id));
- return PromiseHelper.fromResult(true);
+ return true;
}
id() {
return this._id;
}
- // Returns an IAudioSourceNode asynchronously.
- // Reference at node_modules/microsoft-cognitiveservices-speech-sdk/distrib/es2015/src/common/IAudioSource.d.ts
- attach(audioNodeId) {
+ async attach(audioNodeId) {
this.onEvent(new AudioStreamNodeAttachingEvent(this._id, audioNodeId));
- return this.upload(audioNodeId).onSuccessContinueWith(stream => {
- this.onEvent(new AudioStreamNodeAttachedEvent(this._id, audioNodeId));
+ const stream = await this.listen(audioNodeId);
+
+ this.onEvent(new AudioStreamNodeAttachedEvent(this._id, audioNodeId));
+
+ return {
+ detach: () => {
+ stream.readEnded();
- return {
- detach: () => {
- delete this._streams[audioNodeId];
+ delete this._streams[audioNodeId];
- this.onEvent(new AudioStreamNodeDetachedEvent(this._id, audioNodeId));
- this.turnOff();
- },
- id: () => audioNodeId,
- read: stream.read.bind(stream)
- };
- });
+ this.onEvent(new AudioStreamNodeDetachedEvent(this._id, audioNodeId));
+
+ return this.turnOff();
+ },
+ id: () => audioNodeId,
+ read: () => stream.read()
+ };
}
detach(audioNodeId) {
@@ -104,37 +105,34 @@ class QueuedArrayBufferAudioSource {
this.onEvent(new AudioSourceOffEvent(this._id)); // no stream now
- return PromiseHelper.fromResult(true);
+ return true;
}
- // Creates a new Stream with bytes from the first queued ArrayBuffer.
- upload(audioNodeId) {
- return this.turnOn().onSuccessContinueWith(() => {
- const stream = new Stream(audioNodeId);
+ async listen(audioNodeId) {
+ await this.turnOn();
- this._streams[audioNodeId] = stream;
+ const stream = new ChunkedArrayBufferStream(this.format.avgBytesPerSec / 10, audioNodeId);
- const arrayBuffer = this._queue.shift();
+ this._streams[audioNodeId] = stream;
- const { byteLength } = arrayBuffer;
+ const arrayBuffer = this._queue.shift();
+ const { byteLength } = arrayBuffer;
- for (let i = 0; i < byteLength; i += CHUNK_SIZE) {
- stream.writeStreamChunk({
- buffer: arrayBuffer.slice(i, Math.min(i + CHUNK_SIZE, byteLength)),
- isEnd: false,
- timeReceived: Date.now()
- });
- }
+ for (let i = 0; i < byteLength; i += CHUNK_SIZE) {
+ stream.writeStreamChunk({
+ buffer: arrayBuffer.slice(i, Math.min(i + CHUNK_SIZE, byteLength)),
+ isEnd: false,
+ timeReceived: Date.now()
+ });
+ }
- // Stream will only close the internal stream writer.
- stream.close();
+ stream.close();
- return stream;
- });
+ return stream;
}
get format() {
- return PromiseHelper.fromResult(this._audioFormat);
+ return this._audioFormat;
}
get events() {
@@ -142,7 +140,7 @@ class QueuedArrayBufferAudioSource {
}
get deviceInfo() {
- return PromiseHelper.fromResult({
+ return {
bitspersample: this._audioFormat.bitsPerSample,
channelcount: this._audioFormat.channels,
connectivity: 'Unknown',
@@ -150,7 +148,7 @@ class QueuedArrayBufferAudioSource {
model: 'File',
samplerate: this._audioFormat.samplesPerSec,
type: 'File'
- });
+ };
}
}
diff --git a/packages/directlinespeech/__tests__/utilities/readCognitiveServicesAudioStreamAsRiffWaveArrayBuffer.js b/packages/directlinespeech/__tests__/utilities/readCognitiveServicesAudioStreamAsRiffWaveArrayBuffer.js
index 2030862469..7ccdeb996a 100644
--- a/packages/directlinespeech/__tests__/utilities/readCognitiveServicesAudioStreamAsRiffWaveArrayBuffer.js
+++ b/packages/directlinespeech/__tests__/utilities/readCognitiveServicesAudioStreamAsRiffWaveArrayBuffer.js
@@ -1,19 +1,14 @@
import { AudioStreamFormat } from 'microsoft-cognitiveservices-speech-sdk';
import { RiffPcmEncoder } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/RiffPcmEncoder';
-function cognitiveServicesPromiseToESPromise(promise) {
- return new Promise((resolve, reject) => promise.on(resolve, reject));
-}
-
async function readAudioStreamAsPCMArrayBuffer(stream) {
- const read = (...args) => cognitiveServicesPromiseToESPromise(stream.read(...args));
const buffers = [];
let numBytes = 0;
for (let maxChunks = 0; maxChunks < 1000; maxChunks++) {
const buffer = new ArrayBuffer(4096);
- const bytesRead = await read(buffer);
+ const bytesRead = await stream.read(buffer);
if (bytesRead) {
buffers.push(new Uint8Array(buffer, 0, bytesRead));
diff --git a/packages/directlinespeech/package-lock.json b/packages/directlinespeech/package-lock.json
index ab562fdb8b..8f3e9ed98c 100644
--- a/packages/directlinespeech/package-lock.json
+++ b/packages/directlinespeech/package-lock.json
@@ -2836,9 +2836,9 @@
"dev": true
},
"agent-base": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.1.tgz",
- "integrity": "sha512-01q25QQDwLSsyfhrKbn8yuur+JNw0H+0Y4JiGIKd3z9aYk/w/2kxD/Upc+t2ZBBSUNff50VjPsSW2YxM8QYKVg==",
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
+ "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"requires": {
"debug": "4"
}
@@ -6086,7 +6086,8 @@
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/growly/-/growly-1.3.0.tgz",
"integrity": "sha1-8QdIy+dq+WS3yWyTxrzCivEgwIE=",
- "dev": true
+ "dev": true,
+ "optional": true
},
"har-schema": {
"version": "2.0.0",
@@ -6281,9 +6282,9 @@
}
},
"debug": {
- "version": "3.2.6",
- "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
- "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"requires": {
"ms": "^2.1.1"
}
@@ -6624,6 +6625,7 @@
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
"integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
"dev": true,
+ "optional": true,
"requires": {
"is-docker": "^2.0.0"
}
@@ -8794,18 +8796,31 @@
}
},
"microsoft-cognitiveservices-speech-sdk": {
- "version": "1.13.1",
- "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.13.1.tgz",
- "integrity": "sha512-8Jis9HFYwWxaEpxwnFF6kEBZmEvGxfl3VUUdBXObJ0/WzkbvdTsyORRnHxztdSehiuS1BCq+XpmdpzI67KgZ9g==",
+ "version": "1.15.1",
+ "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.15.1.tgz",
+ "integrity": "sha512-tSocYB0o5f8vKI9wJmykpdJA5kOcQCOuQbbUj6nZ9CD9UBu5HBsBoqOmNmIWrjkX+s/g+A8+bYLD0bWHJgoXZg==",
"requires": {
- "agent-base": "^6.0.0",
- "asn1.js-rfc2560": "^5.0.0",
+ "agent-base": "^6.0.2",
+ "asn1.js-rfc2560": "^5.0.1",
"asn1.js-rfc5280": "^3.0.0",
"async-disk-cache": "^2.1.0",
"https-proxy-agent": "^3.0.1",
"simple-lru-cache": "0.0.2",
- "ws": "^7.2.0",
+ "uuid": "^8.3.2",
+ "ws": "^7.4.3",
"xmlhttprequest-ts": "^1.0.1"
+ },
+ "dependencies": {
+ "uuid": {
+ "version": "8.3.2",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
+ "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="
+ },
+ "ws": {
+ "version": "7.4.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.3.tgz",
+ "integrity": "sha512-hr6vCR76GsossIRsr8OLR9acVVm1jyfEWvhbNjtgPOrfvAlKzvyeg/P6r8RuDjRyrcQoPQT7K0DGEPc7Ae6jzA=="
+ }
}
},
"miller-rabin": {
@@ -9036,6 +9051,7 @@
"resolved": "https://registry.npmjs.org/node-notifier/-/node-notifier-8.0.1.tgz",
"integrity": "sha512-BvEXF+UmsnAfYfoapKM9nGxnP+Wn7P91YfXmrKnfcYCx6VBeoN5Ez5Ogck6I8Bi5k4RlpqRYaw75pAwzX9OphA==",
"dev": true,
+ "optional": true,
"requires": {
"growly": "^1.3.0",
"is-wsl": "^2.2.0",
@@ -9050,6 +9066,7 @@
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
"integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
"dev": true,
+ "optional": true,
"requires": {
"yallist": "^4.0.0"
}
@@ -9059,6 +9076,7 @@
"resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz",
"integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==",
"dev": true,
+ "optional": true,
"requires": {
"lru-cache": "^6.0.0"
}
@@ -9068,6 +9086,7 @@
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
+ "optional": true,
"requires": {
"isexe": "^2.0.0"
}
@@ -9076,7 +9095,8 @@
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
"integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
- "dev": true
+ "dev": true,
+ "optional": true
}
}
},
@@ -10700,7 +10720,8 @@
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/shellwords/-/shellwords-0.1.1.tgz",
"integrity": "sha512-vFwSUfQvqybiICwZY5+DAWIPLKsWO31Q91JSKl3UYv+K5c2QRPzn0qzec6QPu1Qc9eHYItiP3NdJqNVqetYAww==",
- "dev": true
+ "dev": true,
+ "optional": true
},
"signal-exit": {
"version": "3.0.3",
@@ -10716,8 +10737,7 @@
"simple-update-in": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/simple-update-in/-/simple-update-in-2.2.0.tgz",
- "integrity": "sha512-FrW41lLiOs82jKxwq39UrE1HDAHOvirKWk4Nv8tqnFFFknVbTxcHZzDS4vt02qqdU/5+KNsQHWzhKHznDBmrww==",
- "dev": true
+ "integrity": "sha512-FrW41lLiOs82jKxwq39UrE1HDAHOvirKWk4Nv8tqnFFFknVbTxcHZzDS4vt02qqdU/5+KNsQHWzhKHznDBmrww=="
},
"sisteransi": {
"version": "1.0.5",
@@ -11740,7 +11760,8 @@
"version": "8.3.1",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.1.tgz",
"integrity": "sha512-FOmRr+FmWEIG8uhZv6C2bTgEVXsHk08kE7mPlrBbEe+c3r9pjceVPgupIfNIhc4yx55H69OXANrUaSuu9eInKg==",
- "dev": true
+ "dev": true,
+ "optional": true
},
"v8-compile-cache": {
"version": "2.2.0",
@@ -11869,11 +11890,11 @@
}
},
"web-speech-cognitive-services": {
- "version": "7.0.2-master.6004e4b",
- "resolved": "https://registry.npmjs.org/web-speech-cognitive-services/-/web-speech-cognitive-services-7.0.2-master.6004e4b.tgz",
- "integrity": "sha512-EsVqaetECwBdTtc/sxKYfIZIMA+eOkcS+PplxgGA1tYm1KYmu/adftSTxQTWDxiQdYVywxHl2smJMDbC4SHU7w==",
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/web-speech-cognitive-services/-/web-speech-cognitive-services-7.1.0.tgz",
+ "integrity": "sha512-U2O9IWkRM3rRggdpLnCBEFQZM5XZ9OKUfaueztNyjoQTw/Fe1gMJOx3GjLWF644eRBxObHMIhgc3YPp6wKsfUQ==",
"requires": {
- "@babel/runtime": "7.9.6",
+ "@babel/runtime": "7.12.5",
"base64-arraybuffer": "0.2.0",
"event-as-promise": "1.0.5",
"event-target-shim": "5.0.1",
@@ -11882,22 +11903,7 @@
"on-error-resume-next": "1.1.0",
"p-defer": "3.0.0",
"p-defer-es5": "1.2.1",
- "simple-update-in": "2.1.1"
- },
- "dependencies": {
- "@babel/runtime": {
- "version": "7.9.6",
- "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.9.6.tgz",
- "integrity": "sha512-64AF1xY3OAkFHqOb9s4jpgk1Mm5vDZ4L3acHvAml+53nO1XbXLuDodsVpO4OIUsmemlUHMxNdYMNJmsvOwLrvQ==",
- "requires": {
- "regenerator-runtime": "^0.13.4"
- }
- },
- "simple-update-in": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/simple-update-in/-/simple-update-in-2.1.1.tgz",
- "integrity": "sha512-Iw4tMvOoibV6XqOqKgKgpMnFdgEtafhZv2KxNhHPAgXBtKrCTY6QFxMpvmSSuRHMK5uJ9hb6X+zniiTHVD7Sig=="
- }
+ "simple-update-in": "2.2.0"
}
},
"webidl-conversions": {
@@ -12268,7 +12274,8 @@
"ws": {
"version": "7.3.1",
"resolved": "https://registry.npmjs.org/ws/-/ws-7.3.1.tgz",
- "integrity": "sha512-D3RuNkynyHmEJIpD2qrgVkc9DQ23OrN/moAwZX4L8DfvszsJxpjQuUq3LMx6HoYji9fbIOBY18XWBsAux1ZZUA=="
+ "integrity": "sha512-D3RuNkynyHmEJIpD2qrgVkc9DQ23OrN/moAwZX4L8DfvszsJxpjQuUq3LMx6HoYji9fbIOBY18XWBsAux1ZZUA==",
+ "dev": true
},
"xml-name-validator": {
"version": "3.0.0",
diff --git a/packages/directlinespeech/package.json b/packages/directlinespeech/package.json
index ec17ca6f05..75bd0d2c92 100644
--- a/packages/directlinespeech/package.json
+++ b/packages/directlinespeech/package.json
@@ -61,10 +61,10 @@
"event-target-shim": "5.0.1",
"event-target-shim-es5": "1.2.0",
"math-random": "2.0.1",
- "microsoft-cognitiveservices-speech-sdk": "1.13.1",
+ "microsoft-cognitiveservices-speech-sdk": "1.15.1",
"p-defer": "3.0.0",
"p-defer-es5": "1.2.1",
- "web-speech-cognitive-services": "7.0.2-master.6004e4b"
+ "web-speech-cognitive-services": "7.1.0"
},
"engines": {
"node": ">=14.0.0"
diff --git a/packages/directlinespeech/src/DirectLineSpeech.js b/packages/directlinespeech/src/DirectLineSpeech.js
index 5560304937..2c6044463d 100644
--- a/packages/directlinespeech/src/DirectLineSpeech.js
+++ b/packages/directlinespeech/src/DirectLineSpeech.js
@@ -7,9 +7,7 @@ import shareObservable from './shareObservable';
import SpeechSynthesisAudioStreamUtterance from './SpeechSynthesisAudioStreamUtterance';
function randomActivityId() {
- return random()
- .toString(36)
- .substr(2);
+ return random().toString(36).substr(2);
}
export default class DirectLineSpeech {
@@ -24,7 +22,17 @@ export default class DirectLineSpeech {
connectionStatusObserver.next(0);
connectionStatusObserver.next(1);
- connectionStatusObserver.next(2);
+
+ dialogServiceConnector.connect(
+ () => {
+ connectionStatusObserver.next(2);
+ },
+ error => {
+ connectionStatusObserver.next(4);
+
+ console.warn('botframework-directlinespeech-sdk: Failed to connect', { error });
+ }
+ );
})
);
diff --git a/packages/directlinespeech/src/cognitiveServicesAsyncFunctionToESAsyncFunction.js b/packages/directlinespeech/src/cognitiveServicesAsyncFunctionToESAsyncFunction.js
deleted file mode 100644
index 40ae7908ea..0000000000
--- a/packages/directlinespeech/src/cognitiveServicesAsyncFunctionToESAsyncFunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-import cognitiveServicesPromiseToESPromise from './cognitiveServicesPromiseToESPromise';
-
-export default function cognitiveServicesAsyncFunctionToESAsyncFunction(fn) {
- return (...args) => cognitiveServicesPromiseToESPromise(fn(...args));
-}
diff --git a/packages/directlinespeech/src/createAdapters.js b/packages/directlinespeech/src/createAdapters.js
index c0fe56753e..4577091f33 100644
--- a/packages/directlinespeech/src/createAdapters.js
+++ b/packages/directlinespeech/src/createAdapters.js
@@ -2,7 +2,6 @@
import { AudioConfig } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/sdk/Audio/AudioConfig';
import { BotFrameworkConfig, DialogServiceConnector, PropertyId } from 'microsoft-cognitiveservices-speech-sdk';
-import { MicAudioSource } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common.browser/MicAudioSource';
import createWebSpeechPonyfillFactory from './createWebSpeechPonyfillFactory';
import DirectLineSpeech from './DirectLineSpeech';
@@ -35,9 +34,13 @@ export default async function create({
throw new Error('"fetchCredentials" must be specified.');
}
- const { authorizationToken, directLineToken, region, subscriptionKey } = await resolveFunctionOrReturnValue(
- fetchCredentials
- );
+ const {
+ authorizationToken,
+ directLineToken,
+ directLineSpeechHostname,
+ region,
+ subscriptionKey
+ } = await resolveFunctionOrReturnValue(fetchCredentials);
if (
(!authorizationToken && !subscriptionKey) ||
@@ -57,8 +60,26 @@ export default async function create({
);
}
- if (!region || typeof region !== 'string') {
- throw new Error('"fetchCredentials" must return "region" as a non-empty string.');
+ if ((directLineSpeechHostname && region) || (!directLineSpeechHostname && !region)) {
+ throw new Error(
+ '"fetchCredentials" must return either "directLineSpeechHostname" or "region" and it must not be an empty string.'
+ );
+ }
+
+ if (directLineSpeechHostname) {
+ if (typeof directLineSpeechHostname !== 'string') {
+ throw new Error('"fetchCredentials" must return "directLineSpeechHostname" as a string.');
+ }
+
+ if (enableInternalHTTPSupport) {
+ throw new Error(
+ '"fetchCredentials" must not return "directLineSpeechHostname" if "enableInternalHTTPSupport" is set.'
+ );
+ }
+ } else {
+ if (typeof region !== 'string') {
+ throw new Error('"fetchCredentials" must return "region" as a string.');
+ }
}
if (audioConfig && audioInputDeviceId) {
@@ -71,31 +92,6 @@ export default async function create({
} else {
audioConfig = AudioConfig.fromDefaultMicrophoneInput();
}
-
- // WORKAROUND: In Speech SDK 1.12.0-1.13.1, it dropped support of macOS/iOS Safari.
- // This code is adopted from microsoft-cognitiveservices-speech-sdk/src/common.browser/MicAudioSource.ts.
- // We will not need this code when using Speech SDK 1.14.0 or up.
- // TODO: [P1] #3575 Remove the following lines when bumping to Speech SDK 1.14.0 or higher
- const { privSource: source } = audioConfig;
-
- source.createAudioContext = () => {
- // eslint-disable-next-line no-extra-boolean-cast
- if (!!source.privContext) {
- return;
- }
-
- const AudioContext = window.AudioContext || window.webkitAudioContext;
-
- if (typeof AudioContext === 'undefined') {
- throw new Error('Browser does not support Web Audio API (AudioContext/webkitAudioContext is not available).');
- }
-
- if (navigator.mediaDevices.getSupportedConstraints().sampleRate) {
- source.privContext = new AudioContext({ sampleRate: MicAudioSource.AUDIOFORMAT.samplesPerSec });
- } else {
- source.privContext = new AudioContext();
- }
- };
}
if (speechRecognitionEndpointId) {
@@ -130,20 +126,35 @@ export default async function create({
let config;
- if (authorizationToken) {
- config = BotFrameworkConfig.fromAuthorizationToken(authorizationToken, region);
+ if (directLineSpeechHostname) {
+ if (authorizationToken) {
+ config = BotFrameworkConfig.fromHost(new URL(`wss://${directLineSpeechHostname}`));
+
+ config.setProperty(PropertyId.SpeechServiceAuthorization_Token, authorizationToken);
+ } else {
+ config = BotFrameworkConfig.fromHost(new URL(`wss://${directLineSpeechHostname}`), subscriptionKey);
+ }
+
+ // TODO: [P1] #3693 In Speech SDK 1.15.0, there is a bug that wrongly construct the endpoint.
+ // https://github.com/microsoft/cognitive-services-speech-sdk-js/issues/315
+ // Remove the following line after the bug is resolved.
+ config.setProperty(PropertyId.SpeechServiceConnection_Host, `wss://${directLineSpeechHostname}`);
} else {
- config = BotFrameworkConfig.fromSubscription(subscriptionKey, region);
- }
+ if (authorizationToken) {
+ config = BotFrameworkConfig.fromAuthorizationToken(authorizationToken, region);
+ } else {
+ config = BotFrameworkConfig.fromSubscription(subscriptionKey, region);
+ }
- // If internal HTTP support is enabled, switch the endpoint to Direct Line on Direct Line Speech service.
- if (enableInternalHTTPSupport) {
- config.setProperty(
- PropertyId.SpeechServiceConnection_Endpoint,
- `wss://${encodeURI(region)}.convai.speech.microsoft.com/directline/api/v1`
- );
+ // If internal HTTP support is enabled, switch the endpoint to Direct Line on Direct Line Speech service.
+ if (enableInternalHTTPSupport) {
+ config.setProperty(
+ PropertyId.SpeechServiceConnection_Endpoint,
+ `wss://${encodeURI(region)}.convai.speech.microsoft.com/directline/api/v1`
+ );
- config.setProperty(PropertyId.Conversation_ApplicationId, directLineToken);
+ config.setProperty(PropertyId.Conversation_Agent_Connection_Id, directLineToken);
+ }
}
// Supported options can be found in DialogConnectorFactory.js.
@@ -167,8 +178,6 @@ export default async function create({
const dialogServiceConnector = patchDialogServiceConnectorInline(new DialogServiceConnector(config, audioConfig));
- dialogServiceConnector.connect();
-
// Renew token per interval.
if (authorizationToken) {
const interval = setInterval(async () => {
@@ -179,7 +188,11 @@ export default async function create({
clearInterval(interval);
}
- const { authorizationToken, region: nextRegion } = await resolveFunctionOrReturnValue(fetchCredentials);
+ const {
+ authorizationToken,
+ directLineSpeechHostname: nextDirectLineSpeechHostname,
+ region: nextRegion
+ } = await resolveFunctionOrReturnValue(fetchCredentials);
if (!authorizationToken) {
return console.warn(
@@ -187,10 +200,18 @@ export default async function create({
);
}
- if (region !== nextRegion) {
- return console.warn(
- 'botframework-directlinespeech-sdk: Region change is not supported for renewed token. Authorization token is not renewed.'
- );
+ if (directLineSpeechHostname) {
+ if (directLineSpeechHostname !== nextDirectLineSpeechHostname) {
+ return console.warn(
+ 'botframework-directlinespeech-sdk: "directLineSpeechHostname" change is not supported for renewed token. Authorization token is not renewed.'
+ );
+ }
+ } else {
+ if (region !== nextRegion) {
+ return console.warn(
+ 'botframework-directlinespeech-sdk: Region change is not supported for renewed token. Authorization token is not renewed.'
+ );
+ }
}
dialogServiceConnector.authorizationToken = authorizationToken; // eslint-disable-line require-atomic-updates
@@ -215,9 +236,9 @@ export default async function create({
);
}
- config.setProperty(PropertyId.Conversation_ApplicationId, refreshedDirectLineToken);
+ config.setProperty(PropertyId.Conversation_Agent_Connection_Id, refreshedDirectLineToken);
- dialogServiceConnector.properties.setProperty(PropertyId.Conversation_ApplicationId, refreshedDirectLineToken);
+ dialogServiceConnector.properties.setProperty(PropertyId.Conversation_Agent_Connection_Id, refreshedDirectLineToken);
dialogServiceConnector.connect();
}, DIRECT_LINE_TOKEN_RENEWAL_INTERVAL);
}
diff --git a/packages/directlinespeech/src/patchDialogServiceConnectorInline.js b/packages/directlinespeech/src/patchDialogServiceConnectorInline.js
index 9edffb82f8..917af064fd 100644
--- a/packages/directlinespeech/src/patchDialogServiceConnectorInline.js
+++ b/packages/directlinespeech/src/patchDialogServiceConnectorInline.js
@@ -38,7 +38,8 @@ export default function patchDialogServiceConnectorInline(dialogServiceConnector
}
);
- // TODO: startContinuousRecognitionAsync is not working yet, use listenOnceAsync instead.
+ // TODO: [P1] #2664 startContinuousRecognitionAsync is not working yet in Speech SDK 1.15.0.
+ // We need to polyfill to use listenOnceAsync instead, and disable stopContinuousRecognitionAsync.
dialogServiceConnector.startContinuousRecognitionAsync = (resolve, reject) => {
dialogServiceConnector.listenOnceAsync(
() => {
diff --git a/packages/directlinespeech/src/playCognitiveServicesStream.js b/packages/directlinespeech/src/playCognitiveServicesStream.js
index 39a7b8f52f..c1295e8dda 100644
--- a/packages/directlinespeech/src/playCognitiveServicesStream.js
+++ b/packages/directlinespeech/src/playCognitiveServicesStream.js
@@ -2,7 +2,6 @@
/* eslint no-await-in-loop: "off" */
/* eslint prefer-destructuring: "off" */
-import cognitiveServicesAsyncFunctionToESAsyncFunction from './cognitiveServicesAsyncFunctionToESAsyncFunction';
import createMultiBufferingPlayer from './createMultiBufferingPlayer';
// Safari requires an audio buffer with a sample rate of 22050 Hz.
@@ -118,23 +117,34 @@ export default async function playCognitiveServicesStream(audioContext, stream,
const { format } = stream;
const abortPromise = abortToReject(signal);
const array = new Uint8Array(DEFAULT_BUFFER_SIZE);
- const streamRead = cognitiveServicesAsyncFunctionToESAsyncFunction(stream.read.bind(stream));
const read = () =>
Promise.race([
// Abort will gracefully end the queue. We will check signal.aborted later to throw abort exception.
// eslint-disable-next-line no-empty-function
abortPromise.catch(() => {}),
- streamRead(array.buffer).then(numBytes =>
- numBytes === array.byteLength ? array : numBytes ? array.slice(0, numBytes) : undefined
- )
+ stream
+ .read(array.buffer)
+ .then(numBytes => (numBytes === array.byteLength ? array : numBytes ? array.slice(0, numBytes) : undefined))
]);
if (signal.aborted) {
throw new Error('aborted');
}
- let newSamplesPerSec = format.samplesPerSec;
+ let { samplesPerSec } = format;
+
+ // TODO: [P0] #3692 Remove the following if-condition block when the underlying bugs are resolved.
+ // There is a bug in Speech SDK 1.15.0 that returns 24kHz instead of 16kHz.
+ // Even if we explicitly specify the output audio format to 16kHz, there is another bug that ignored it.
+ // In short, DLSpeech service currently always streams in RIFF WAV format, instead of MP3.
+ // https://github.com/microsoft/cognitive-services-speech-sdk-js/issues/313
+ // https://github.com/microsoft/cognitive-services-speech-sdk-js/issues/314
+ if (format.requestAudioFormatString === 'audio-24khz-48kbitrate-mono-mp3') {
+ samplesPerSec = 16000;
+ }
+
+ let newSamplesPerSec = samplesPerSec;
let sampleRateMultiplier = 1;
// Safari requires a minimum sample rate of 22100 Hz.
@@ -143,7 +153,7 @@ export default async function playCognitiveServicesStream(audioContext, stream,
// For security, data will only be upsampled up to 96000 Hz.
while (newSamplesPerSec < MIN_SAMPLE_RATE && newSamplesPerSec < 96000) {
sampleRateMultiplier++;
- newSamplesPerSec = format.samplesPerSec * sampleRateMultiplier;
+ newSamplesPerSec = samplesPerSec * sampleRateMultiplier;
}
// The third parameter is the sample size in bytes.
diff --git a/packages/directlinespeech/src/playCognitiveServicesStream.spec.js b/packages/directlinespeech/src/playCognitiveServicesStream.spec.js
index 7fa873aacf..b5cbb51319 100644
--- a/packages/directlinespeech/src/playCognitiveServicesStream.spec.js
+++ b/packages/directlinespeech/src/playCognitiveServicesStream.spec.js
@@ -3,7 +3,6 @@
*/
import hasResolved from 'has-resolved';
-import { PromiseHelper } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Promise';
import playCognitiveServicesStream from './playCognitiveServicesStream';
@@ -72,9 +71,9 @@ function createStreamFromChunks(format, chunks) {
if (chunk) {
new Uint8Array(destination).set(new Uint8Array(chunk));
- return PromiseHelper.fromResult(chunk.byteLength);
+ return Promise.resolve(chunk.byteLength);
} else {
- return PromiseHelper.fromResult(0);
+ return Promise.resolve(0);
}
}
};
@@ -174,9 +173,7 @@ test('should stop when abort is called after all buffer queued', async () => {
test('should stop when abort is called before first buffer is queued', async () => {
const audioContext = createMockAudioContext();
const abortController = new AbortController();
- const read = jest.fn(() => ({
- on() {}
- }));
+ const read = jest.fn(() => new Promise(() => {}));
const playPromise = playCognitiveServicesStream(
audioContext,
diff --git a/packages/testharness/package-lock.json b/packages/testharness/package-lock.json
index 95f1892c6b..1da61d2684 100644
--- a/packages/testharness/package-lock.json
+++ b/packages/testharness/package-lock.json
@@ -2085,19 +2085,19 @@
"integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA=="
},
"agent-base": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.1.tgz",
- "integrity": "sha512-01q25QQDwLSsyfhrKbn8yuur+JNw0H+0Y4JiGIKd3z9aYk/w/2kxD/Upc+t2ZBBSUNff50VjPsSW2YxM8QYKVg==",
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz",
+ "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==",
"requires": {
"debug": "4"
},
"dependencies": {
"debug": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
- "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
"requires": {
- "ms": "^2.1.1"
+ "ms": "2.1.2"
}
},
"ms": {
@@ -2258,11 +2258,11 @@
},
"dependencies": {
"debug": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
- "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
+ "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==",
"requires": {
- "ms": "^2.1.1"
+ "ms": "2.1.2"
}
},
"ms": {
@@ -3845,17 +3845,17 @@
}
},
"debug": {
- "version": "3.2.6",
- "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
- "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
"requires": {
"ms": "^2.1.1"
}
},
"ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
}
}
},
@@ -4511,17 +4511,18 @@
}
},
"microsoft-cognitiveservices-speech-sdk": {
- "version": "1.13.1",
- "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.13.1.tgz",
- "integrity": "sha512-8Jis9HFYwWxaEpxwnFF6kEBZmEvGxfl3VUUdBXObJ0/WzkbvdTsyORRnHxztdSehiuS1BCq+XpmdpzI67KgZ9g==",
+ "version": "1.15.1",
+ "resolved": "https://registry.npmjs.org/microsoft-cognitiveservices-speech-sdk/-/microsoft-cognitiveservices-speech-sdk-1.15.1.tgz",
+ "integrity": "sha512-tSocYB0o5f8vKI9wJmykpdJA5kOcQCOuQbbUj6nZ9CD9UBu5HBsBoqOmNmIWrjkX+s/g+A8+bYLD0bWHJgoXZg==",
"requires": {
- "agent-base": "^6.0.0",
- "asn1.js-rfc2560": "^5.0.0",
+ "agent-base": "^6.0.2",
+ "asn1.js-rfc2560": "^5.0.1",
"asn1.js-rfc5280": "^3.0.0",
"async-disk-cache": "^2.1.0",
"https-proxy-agent": "^3.0.1",
"simple-lru-cache": "0.0.2",
- "ws": "^7.2.0",
+ "uuid": "^8.3.2",
+ "ws": "^7.4.3",
"xmlhttprequest-ts": "^1.0.1"
}
},
@@ -6225,6 +6226,11 @@
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8="
},
+ "uuid": {
+ "version": "8.3.2",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
+ "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="
+ },
"v8-compile-cache": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.1.tgz",
@@ -6511,9 +6517,9 @@
"integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
},
"ws": {
- "version": "7.3.1",
- "resolved": "https://registry.npmjs.org/ws/-/ws-7.3.1.tgz",
- "integrity": "sha512-D3RuNkynyHmEJIpD2qrgVkc9DQ23OrN/moAwZX4L8DfvszsJxpjQuUq3LMx6HoYji9fbIOBY18XWBsAux1ZZUA=="
+ "version": "7.4.3",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.4.3.tgz",
+ "integrity": "sha512-hr6vCR76GsossIRsr8OLR9acVVm1jyfEWvhbNjtgPOrfvAlKzvyeg/P6r8RuDjRyrcQoPQT7K0DGEPc7Ae6jzA=="
},
"xmlhttprequest-ts": {
"version": "1.0.1",
diff --git a/packages/testharness/package.json b/packages/testharness/package.json
index 11f06618b6..24ed156641 100644
--- a/packages/testharness/package.json
+++ b/packages/testharness/package.json
@@ -46,7 +46,7 @@
"expect": "^25.5.0",
"lolex": "6.0.0",
"math-random": "^2.0.1",
- "microsoft-cognitiveservices-speech-sdk": "1.13.1",
+ "microsoft-cognitiveservices-speech-sdk": "1.15.1",
"p-defer": "^3.0.0",
"p-defer-es5": "^1.2.1",
"simple-update-in": "^2.2.0"
diff --git a/packages/testharness/src/speech/speechRecognition/createQueuedArrayBufferAudioSource.js b/packages/testharness/src/speech/speechRecognition/createQueuedArrayBufferAudioSource.js
index fb026150ed..520cfc8725 100644
--- a/packages/testharness/src/speech/speechRecognition/createQueuedArrayBufferAudioSource.js
+++ b/packages/testharness/src/speech/speechRecognition/createQueuedArrayBufferAudioSource.js
@@ -1,4 +1,4 @@
-import { AudioStreamFormat } from '../../external/microsoft-cognitiveservices-speech-sdk';
+import { AudioStreamFormat } from 'microsoft-cognitiveservices-speech-sdk';
import {
AudioSourceErrorEvent,
@@ -8,16 +8,16 @@ import {
AudioStreamNodeAttachedEvent,
AudioStreamNodeAttachingEvent,
AudioStreamNodeDetachedEvent
-} from '../../external/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/AudioSourceEvents';
+} from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/AudioSourceEvents';
-import { createNoDashGuid } from '../../external/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Guid';
-import { Events } from '../../external/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Events';
-import { EventSource } from '../../external/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/EventSource';
-import { PromiseHelper } from '../../external/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Promise';
-import { Stream } from '../../external/microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Stream';
+import { ChunkedArrayBufferStream } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/ChunkedArrayBufferStream';
+import { createNoDashGuid } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Guid';
+import { Events } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/Events';
+import { EventSource } from 'microsoft-cognitiveservices-speech-sdk/distrib/lib/src/common/EventSource';
const CHUNK_SIZE = 4096;
+// This is copied from MicAudioSource, but instead of retrieving from MediaStream, we dump the ArrayBuffer directly.
class QueuedArrayBufferAudioSource {
constructor(audioFormat, audioSourceId = createNoDashGuid()) {
this._audioFormat = audioFormat;
@@ -31,9 +31,17 @@ class QueuedArrayBufferAudioSource {
};
this._events = new EventSource();
+
+ this.attach = this.attach.bind(this);
+ this.detach = this.detach.bind(this);
+ this.id = this.id.bind(this);
+ this.listen = this.listen.bind(this);
+ this.push = this.push.bind(this);
+ this.turnOff = this.turnOff.bind(this);
+ this.turnOn = this.turnOn.bind(this);
}
- push = arrayBuffer => {
+ push(arrayBuffer) {
// 10 seconds of audio in bytes =
// sample rate (bytes/second) * 600 (seconds) + 44 (size of the wave header).
const maxSize = this._audioFormat.samplesPerSec * 600 + 44;
@@ -43,43 +51,46 @@ class QueuedArrayBufferAudioSource {
this.onEvent(new AudioSourceErrorEvent(errorMsg, ''));
- return PromiseHelper.fromError(errorMsg);
+ return Promise.reject(errorMsg);
}
this._queue.push(arrayBuffer);
- };
+ }
- turnOn = () => {
+ turnOn() {
this.onEvent(new AudioSourceInitializingEvent(this._id)); // no stream id
this.onEvent(new AudioSourceReadyEvent(this._id));
- return PromiseHelper.fromResult(true);
- };
+ return true;
+ }
- id = () => this._id;
+ id() {
+ return this._id;
+ }
- // Returns an IAudioSourceNode asynchronously.
- // Reference at node_modules/microsoft-cognitiveservices-speech-sdk/distrib/es2015/src/common/IAudioSource.d.ts
- attach = audioNodeId => {
+ async attach(audioNodeId) {
this.onEvent(new AudioStreamNodeAttachingEvent(this._id, audioNodeId));
- return this.upload(audioNodeId).onSuccessContinueWith(stream => {
- this.onEvent(new AudioStreamNodeAttachedEvent(this._id, audioNodeId));
+ const stream = await this.listen(audioNodeId);
+
+ this.onEvent(new AudioStreamNodeAttachedEvent(this._id, audioNodeId));
+
+ return {
+ detach: () => {
+ stream.readEnded();
+
+ delete this._streams[audioNodeId];
- return {
- detach: () => {
- delete this._streams[audioNodeId];
+ this.onEvent(new AudioStreamNodeDetachedEvent(this._id, audioNodeId));
- this.onEvent(new AudioStreamNodeDetachedEvent(this._id, audioNodeId));
- this.turnOff();
- },
- id: () => audioNodeId,
- read: stream.read.bind(stream)
- };
- });
- };
+ return this.turnOff();
+ },
+ id: () => audioNodeId,
+ read: () => stream.read()
+ };
+ }
- detach = audioNodeId => {
+ detach(audioNodeId) {
if (audioNodeId && this._streams[audioNodeId]) {
this._streams[audioNodeId].close();
@@ -87,44 +98,41 @@ class QueuedArrayBufferAudioSource {
this.onEvent(new AudioStreamNodeDetachedEvent(this._id, audioNodeId));
}
- };
+ }
- turnOff = () => {
+ turnOff() {
Object.values(this._streams).forEach(stream => stream && !stream.isClosed && stream.close());
this.onEvent(new AudioSourceOffEvent(this._id)); // no stream now
- return PromiseHelper.fromResult(true);
- };
+ return true;
+ }
- // Creates a new Stream object merge all chunks from _queue into a single IAudioStreamNode
- upload = audioNodeId => {
- return this.turnOn().onSuccessContinueWith(() => {
- const stream = new Stream(audioNodeId);
+ async listen(audioNodeId) {
+ await this.turnOn();
- this._streams[audioNodeId] = stream;
+ const stream = new ChunkedArrayBufferStream(this.format.avgBytesPerSec / 10, audioNodeId);
- const arrayBuffer = this._queue.shift();
+ this._streams[audioNodeId] = stream;
- const { byteLength } = arrayBuffer;
+ const arrayBuffer = this._queue.shift();
+ const { byteLength } = arrayBuffer;
- for (let i = 0; i < byteLength; i += CHUNK_SIZE) {
- stream.writeStreamChunk({
- buffer: arrayBuffer.slice(i, Math.min(i + CHUNK_SIZE, byteLength)),
- isEnd: false,
- timeReceived: Date.now()
- });
- }
+ for (let i = 0; i < byteLength; i += CHUNK_SIZE) {
+ stream.writeStreamChunk({
+ buffer: arrayBuffer.slice(i, Math.min(i + CHUNK_SIZE, byteLength)),
+ isEnd: false,
+ timeReceived: Date.now()
+ });
+ }
- // Stream will only close the internal stream writer.
- stream.close();
+ stream.close();
- return stream;
- });
- };
+ return stream;
+ }
get format() {
- return PromiseHelper.fromResult(this._audioFormat);
+ return this._audioFormat;
}
get events() {
@@ -132,7 +140,7 @@ class QueuedArrayBufferAudioSource {
}
get deviceInfo() {
- return PromiseHelper.fromResult({
+ return {
bitspersample: this._audioFormat.bitsPerSample,
channelcount: this._audioFormat.channels,
connectivity: 'Unknown',
@@ -140,7 +148,7 @@ class QueuedArrayBufferAudioSource {
model: 'File',
samplerate: this._audioFormat.samplesPerSec,
type: 'File'
- });
+ };
}
}
diff --git a/packages/testharness/src/speech/speechRecognition/fetchSpeechData.js b/packages/testharness/src/speech/speechRecognition/fetchSpeechData.js
index 8d37ac5328..76444a74a7 100644
--- a/packages/testharness/src/speech/speechRecognition/fetchSpeechData.js
+++ b/packages/testharness/src/speech/speechRecognition/fetchSpeechData.js
@@ -31,7 +31,7 @@ export default async function({
} else if (authorizationToken && subscriptionKey) {
throw new Error('Only "authorizationToken" or "subscriptionKey" should be set.');
} else if ((region && speechSynthesisHostname) || (!region && !speechSynthesisHostname)) {
- throw new Error('Only "region" or "speechSynthesisHostnamename" should be set.');
+ throw new Error('Only "region" or "speechSynthesisHostname" should be set.');
}
const ssml = isSSML(text) ? text : buildSSML({ lang, pitch, rate, text, voice, volume });