diff --git a/CHANGELOG.md b/CHANGELOG.md
index d66ba9947d..2c48ea14f9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -30,7 +30,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
### Added
- Resolves [#2539](https://github.com/Microsoft/BotFramework-WebChat/issues/2539), added React hooks for customization, by [@compulim](https://github.com/compulim), in the following PRs:
- - PR [#2540](https://github.com/microsoft/BotFramework-WebChat/pull/2540): `useActivities`, `useReferenceGrammarID`, `useSendBoxDictationStarted`
+ - PR [#2540](https://github.com/microsoft/BotFramework-WebChat/pull/2540): `useActivities`, `useReferenceGrammarID`, `useSendBoxShowInterims`
- PR [#2541](https://github.com/microsoft/BotFramework-WebChat/pull/2541): `useStyleOptions`, `useStyleSet`
- PR [#2542](https://github.com/microsoft/BotFramework-WebChat/pull/2542): `useLanguage`, `useLocalize`, `useLocalizeDate`
- PR [#2543](https://github.com/microsoft/BotFramework-WebChat/pull/2543): `useAdaptiveCardsHostConfig`, `useAdaptiveCardsPackage`, `useRenderMarkdownAsHTML`
@@ -42,6 +42,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- PR [#2551](https://github.com/microsoft/BotFramework-WebChat/pull/2551): `useLastTypingAt`, `useSendTypingIndicator`, `useTypingIndicator`
- PR [#2552](https://github.com/microsoft/BotFramework-WebChat/pull/2552): `useFocusSendBox`, `useScrollToEnd`, `useSendBoxValue`, `useSubmitSendBox`, `useTextBoxSubmit`, `useTextBoxValue`
- PR [#2553](https://github.com/microsoft/BotFramework-WebChat/pull/2553): `useDictateInterims`, `useDictateState`, `useGrammars`, `useMarkActivityAsSpoken`, `useMicrophoneButton`, `useShouldSpeakIncomingActivity`, `useStartDictate`, `useStopDictate`, `useVoiceSelector`, `useWebSpeechPonyfill`
+ - PR [#2554](https://github.com/microsoft/BotFramework-WebChat/pull/2554): `useRenderActivity`, `useRenderAttachment`
- Bring your own Adaptive Cards package by specifying `adaptiveCardsPackage` prop, by [@compulim](https://github.com/compulim) in PR [#2543](https://github.com/microsoft/BotFramework-WebChat/pull/2543)
- Fixes [#2597](https://github.com/microsoft/BotFramework-WebChat/issues/2597). Modify `watch` script to `start` and add `tableflip` script for throwing `node_modules`, by [@corinagum](https://github.com/corinagum) in PR [#2598](https://github.com/microsoft/BotFramework-WebChat/pull/2598)
- Adds Arabic Language Support, by [@midineo](https://github.com/midineo), in PR [#2593](https://github.com/microsoft/BotFramework-WebChat/pull/2593)
@@ -55,12 +56,12 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Fixes [#2512](https://github.com/microsoft/BotFramework-WebChat/issues/2512). Adds check to ensure Adaptive Card's content is an Object, by [@tdurnford](https://github.com/tdurnford) in PR [#2590](https://github.com/microsoft/BotFramework-WebChat/pull/2590)
- Fixes [#1780](https://github.com/microsoft/BotFramework-WebChat/issues/1780), [#2277](https://github.com/microsoft/BotFramework-WebChat/issues/2277), and [#2285](https://github.com/microsoft/BotFramework-WebChat/issues/2285). Make Suggested Actions accessible, Fix Markdown card in carousel being read multiple times, and label widgets of Connectivity Status and Suggested Actions containers, by [@corinagum](https://github.com/corinagum) in PR [#2613](https://github.com/microsoft/BotFramework-WebChat/pull/2613)
- Fixes [#2608](https://github.com/microsoft/BotFramework-WebChat/issues/2608). Focus will return to sendbox after clicking New Messages or a Suggested Actions button, by [@corinagum](https://github.com/corinagum) in PR [#2628](https://github.com/microsoft/BotFramework-WebChat/pull/2628)
-- `component`: Fixes [#2331](https://github.com/microsoft/BotFramework-WebChat/issues/2331). Updated timer to use React Hooks, by [@spyip](https://github.com/spyip) in PR [#2546](https://github.com/microsoft/BotFramework-WebChat/pull/2546)
- Resolves [#2597](https://github.com/microsoft/BotFramework-WebChat/issues/2597). Modify `watch` script to `start` and add `tableflip` script for throwing `node_modules`, by [@corinagum](https://github.com/corinagum) in PR [#2598](https://github.com/microsoft/BotFramework-WebChat/pull/2598)
-- Adds `suggestedActionLayout` to `defaultStyleOptions`, by [@spyip](https://github.com/spyip), in PR [#2596](https://github.com/microsoft/BotFramework-WebChat/pull/2596)
+- Resolves [#1835](https://github.com/microsoft/BotFramework-WebChat/issues/1835). Adds `suggestedActionLayout` to `defaultStyleOptions`, by [@spyip](https://github.com/spyip), in PR [#2596](https://github.com/microsoft/BotFramework-WebChat/pull/2596)
- Resolves [#2331](https://github.com/microsoft/BotFramework-WebChat/issues/2331). Updated timer to use React Hooks, by [@spyip](https://github.com/spyip) in PR [#2546](https://github.com/microsoft/BotFramework-WebChat/pull/2546)
-- Resolves [#2620](https://github.com/microsoft/BotFramework-WebChat/issues/2620), update Chinese localization files, by [@spyip](https://github.com/spyip) in PR [#2631](https://github.com/microsoft/BotFramework-WebChat/pull/2631)
+- Resolves [#2620](https://github.com/microsoft/BotFramework-WebChat/issues/2620). Adds Chinese localization files, by [@spyip](https://github.com/spyip) in PR [#2631](https://github.com/microsoft/BotFramework-WebChat/pull/2631)
- Fixes [#2639](https://github.com/microsoft/BotFramework-WebChat/issues/2639). Fix passed in prop time from string to boolean, by [@corinagum](https://github.com/corinagum) in PR [#2640](https://github.com/microsoft/BotFramework-WebChat/pull/2640)
+- `component`: Updated timer to use functional component, by [@spyip](https://github.com/spyip) in PR [#2546](https://github.com/microsoft/BotFramework-WebChat/pull/2546)
### Changed
diff --git a/HOOKS.md b/HOOKS.md
index 4089c1aba1..754606f322 100644
--- a/HOOKS.md
+++ b/HOOKS.md
@@ -320,17 +320,18 @@ This value is not controllable and is passed to Web Chat from the Direct Line ch
## `useRenderActivity`
```js
-useRenderActivity(): ({
- activity: Activity,
+useRenderActivity(
renderAttachment: ({
activity: Activity,
attachment: Attachment
- }) => React.Element,
+ }) => React.Element
+): ({
+ activity: Activity,
timestampClassName: string
}) => React.Element
```
-This function is for rendering an activity inside a React element. The caller will need to pass `activity`, `timestampClassName`, and a render function for the attachment. This function is a composition of `activityRendererMiddleware`, which is passed as a prop.
+This function is for rendering an activity and its attachments inside a React element. Because of the parent-child relationship, the caller will need to pass a render function in order for the attachment to create a render function for the activity. When rendering the activity, the caller will need to pass `activity` and `timestampClassName`. This function is a composition of `activityRendererMiddleware`, which is passed as a prop.
## `useRenderAttachment`
@@ -596,15 +597,15 @@ This value can be partly controllable through Web Chat props.
These are hooks that are specific for the send box.
-- [`useSendBoxDictationStarted`](#usesendboxdictationstarted)
+- [`useSendBoxSpeechInterimsVisible`](#usesendboxspeechinterimsvisible)
-### `useSendBoxDictationStarted`
+### `useSendBoxSpeechInterimsVisible`
```js
-useSendBoxDictationStarted(): [boolean]
+useSendBoxSpeechInterimsVisible(): [boolean]
```
-This function will return whether speech-to-text detection has been started or not.
+This function will return whether the send box should show speech interims.
## `TextBox`
diff --git a/__tests__/__image_snapshots__/chrome-docker/use-text-box-js-calling-submit-should-scroll-to-end-1-snap.png b/__tests__/__image_snapshots__/chrome-docker/use-text-box-js-calling-submit-should-scroll-to-end-1-snap.png
new file mode 100644
index 0000000000..9554f29539
Binary files /dev/null and b/__tests__/__image_snapshots__/chrome-docker/use-text-box-js-calling-submit-should-scroll-to-end-1-snap.png differ
diff --git a/__tests__/__image_snapshots__/chrome-docker/use-text-box-js-calling-submit-should-scroll-to-end-2-snap.png b/__tests__/__image_snapshots__/chrome-docker/use-text-box-js-calling-submit-should-scroll-to-end-2-snap.png
new file mode 100644
index 0000000000..f336aabc59
Binary files /dev/null and b/__tests__/__image_snapshots__/chrome-docker/use-text-box-js-calling-submit-should-scroll-to-end-2-snap.png differ
diff --git a/__tests__/hooks/useMicrophoneButton.js b/__tests__/hooks/useMicrophoneButton.js
index 85d0287771..8f750aeb78 100644
--- a/__tests__/hooks/useMicrophoneButton.js
+++ b/__tests__/hooks/useMicrophoneButton.js
@@ -9,20 +9,27 @@ import uiConnected from '../setup/conditions/uiConnected';
jest.setTimeout(timeouts.test);
test('microphoneButtonClick should toggle recording', async () => {
- // TODO: [P1] Test is temporarily disabled until the hook is implemented
- // const { driver, pageObjects } = await setupWebDriver({
- // props: {
- // webSpeechPonyfillFactory: () => window.WebSpeechMock
- // }
- // });
- // await driver.wait(uiConnected(), timeouts.directLine);
- // await pageObjects.runHook('useMicrophoneButtonClick', [], microphoneButtonClick => microphoneButtonClick());
- // await driver.wait(speechRecognitionStartCalled(), timeouts.ui);
- // await expect(
- // pageObjects.runHook('useMicrophoneButtonDisabled', [], microphoneButtonDisabled => microphoneButtonDisabled[0])
- // ).resolves.toBeTruthy();
- // await pageObjects.putSpeechRecognitionResult('recognizing', 'Hello');
- // await expect(pageObjects.isDictating()).resolves.toBeTruthy();
- // await pageObjects.runHook('useMicrophoneButtonClick', [], microphoneButtonClick => microphoneButtonClick());
- // await expect(pageObjects.isDictating()).resolves.toBeFalsy();
+ const { driver, pageObjects } = await setupWebDriver({
+ props: {
+ webSpeechPonyfillFactory: () => window.WebSpeechMock
+ }
+ });
+
+ await driver.wait(uiConnected(), timeouts.directLine);
+
+ await pageObjects.runHook('useMicrophoneButtonClick', [], microphoneButtonClick => microphoneButtonClick());
+
+ await driver.wait(speechRecognitionStartCalled(), timeouts.ui);
+
+ await expect(
+ pageObjects.runHook('useMicrophoneButtonDisabled', [], microphoneButtonDisabled => microphoneButtonDisabled[0])
+ ).resolves.toBeTruthy();
+
+ await pageObjects.putSpeechRecognitionResult('recognizing', 'Hello');
+
+ await expect(pageObjects.isDictating()).resolves.toBeTruthy();
+
+ await pageObjects.runHook('useMicrophoneButtonClick', [], microphoneButtonClick => microphoneButtonClick());
+
+ await expect(pageObjects.isDictating()).resolves.toBeFalsy();
});
diff --git a/__tests__/hooks/useSendBoxSpeechInterimsVisible.js b/__tests__/hooks/useSendBoxSpeechInterimsVisible.js
new file mode 100644
index 0000000000..cc1feb4969
--- /dev/null
+++ b/__tests__/hooks/useSendBoxSpeechInterimsVisible.js
@@ -0,0 +1,92 @@
+import { timeouts } from '../constants.json';
+
+import negate from '../setup/conditions/negate';
+import speechRecognitionStartCalled from '../setup/conditions/speechRecognitionStartCalled';
+import speechSynthesisUtterancePended from '../setup/conditions/speechSynthesisUtterancePended';
+import uiConnected from '../setup/conditions/uiConnected';
+
+// selenium-webdriver API doc:
+// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html
+
+jest.setTimeout(timeouts.test);
+
+test('sendBoxSpeechInterimsVisible should return if dictation is started or not', async () => {
+ const { driver, pageObjects } = await setupWebDriver({
+ props: {
+ webSpeechPonyfillFactory: () => window.WebSpeechMock
+ }
+ });
+
+ await driver.wait(uiConnected(), timeouts.directLine);
+
+ await expect(
+ pageObjects.runHook(
+ 'useSendBoxSpeechInterimsVisible',
+ [],
+ sendBoxSpeechInterimsVisible => sendBoxSpeechInterimsVisible[0]
+ )
+ ).resolves.toMatchInlineSnapshot(`false`);
+
+ await pageObjects.clickMicrophoneButton();
+
+ await driver.wait(speechRecognitionStartCalled(), timeouts.ui);
+
+ await expect(
+ pageObjects.runHook(
+ 'useSendBoxSpeechInterimsVisible',
+ [],
+ sendBoxSpeechInterimsVisible => sendBoxSpeechInterimsVisible[0]
+ )
+ ).resolves.toMatchInlineSnapshot(`true`);
+
+ await pageObjects.putSpeechRecognitionResult('recognizing', 'Hello');
+
+ await expect(
+ pageObjects.runHook(
+ 'useSendBoxSpeechInterimsVisible',
+ [],
+ sendBoxSpeechInterimsVisible => sendBoxSpeechInterimsVisible[0]
+ )
+ ).resolves.toMatchInlineSnapshot(`true`);
+});
+
+test('sendBoxSpeechInterimsVisible should return false when synthesizing', async () => {
+ const { driver, pageObjects } = await setupWebDriver({
+ props: {
+ webSpeechPonyfillFactory: () => window.WebSpeechMock
+ }
+ });
+
+ await driver.wait(uiConnected(), timeouts.directLine);
+ await pageObjects.sendMessageViaMicrophone('Hello, World!');
+ await expect(pageObjects.startSpeechSynthesize());
+
+ await expect(
+ pageObjects.runHook(
+ 'useSendBoxSpeechInterimsVisible',
+ [],
+ sendBoxSpeechInterimsVisible => sendBoxSpeechInterimsVisible[0]
+ )
+ ).resolves.toMatchInlineSnapshot(`false`);
+
+ await driver.wait(speechSynthesisUtterancePended(), timeouts.ui);
+
+ await pageObjects.clickMicrophoneButton();
+
+ await driver.wait(negate(speechSynthesisUtterancePended()), timeouts.ui);
+
+ await expect(
+ pageObjects.runHook(
+ 'useSendBoxSpeechInterimsVisible',
+ [],
+ sendBoxSpeechInterimsVisible => sendBoxSpeechInterimsVisible[0]
+ )
+ ).resolves.toMatchInlineSnapshot(`true`);
+});
+
+test('setter should be undefined', async () => {
+ const { pageObjects } = await setupWebDriver();
+ const [_, setLanguage] = await pageObjects.runHook('useSendBoxSpeechInterimsVisible');
+
+ expect(setLanguage).toBeUndefined();
+});
diff --git a/__tests__/hooks/useStartDictate.js b/__tests__/hooks/useStartDictate.js
index d03340c312..8fb4eca99e 100644
--- a/__tests__/hooks/useStartDictate.js
+++ b/__tests__/hooks/useStartDictate.js
@@ -1,6 +1,5 @@
import { timeouts } from '../constants.json';
-import isDictating from '../setup/pageObjects/isDictating';
import uiConnected from '../setup/conditions/uiConnected';
// selenium-webdriver API doc:
@@ -18,5 +17,11 @@ test('calling startDictate should start dictate', async () => {
await driver.wait(uiConnected(), timeouts.directLine);
await pageObjects.runHook('useStartDictate', [], startDictate => startDictate());
+ // The engine is starting, but not fully started yet.
await expect(pageObjects.isDictating()).resolves.toBeFalsy();
+
+ await pageObjects.putSpeechRecognitionResult('recognizing', 'Hello, World!');
+
+ // The engine has started, and recognition is ongoing and is not stopping.
+ await expect(pageObjects.isDictating()).resolves.toBeTruthy();
});
diff --git a/__tests__/hooks/useTextBox.js b/__tests__/hooks/useTextBox.js
index 2873052478..d67fe8d42a 100644
--- a/__tests__/hooks/useTextBox.js
+++ b/__tests__/hooks/useTextBox.js
@@ -9,21 +9,31 @@ import uiConnected from '../setup/conditions/uiConnected';
jest.setTimeout(timeouts.test);
-// TODO: [P1] Test is temporarily disable until fully implemented
test('calling submit should scroll to end', async () => {
- // const { driver, pageObjects } = await setupWebDriver();
- // await driver.wait(uiConnected(), timeouts.directLine);
- // await pageObjects.typeOnSendBox('help');
- // await expect(pageObjects.runHook('useTextBoxValue', [], textBoxValue => textBoxValue[0])).resolves.toBe('help');
- // await pageObjects.clickSendButton();
- // await driver.wait(minNumActivitiesShown(2), timeouts.directLine);
- // await driver.wait(scrollToBottomCompleted(), timeouts.scrollToBottom);
- // await driver.executeScript(() => {
- // document.querySelector('[role="log"] > *').scrollTop = 0;
- // });
- // expect(await driver.takeScreenshot()).toMatchImageSnapshot(imageSnapshotOptions);
- // await pageObjects.runHook('useTextBoxValue', [], textBoxValue => textBoxValue[1]('Hello, World!'));
- // await pageObjects.runHook('useTextBoxSubmit', [], textBoxSubmit => textBoxSubmit());
- // await driver.wait(scrollToBottomCompleted(), timeouts.scrollToBottom);
- // expect(await driver.takeScreenshot()).toMatchImageSnapshot(imageSnapshotOptions);
+ const { driver, pageObjects } = await setupWebDriver();
+
+ await driver.wait(uiConnected(), timeouts.directLine);
+
+ await pageObjects.typeOnSendBox('help');
+
+ await expect(pageObjects.runHook('useTextBoxValue', [], textBoxValue => textBoxValue[0])).resolves.toBe('help');
+
+ await pageObjects.clickSendButton();
+
+ await driver.wait(minNumActivitiesShown(2), timeouts.directLine);
+ await driver.wait(scrollToBottomCompleted(), timeouts.scrollToBottom);
+
+ await driver.executeScript(() => {
+ document.querySelector('[role="log"] > *').scrollTop = 0;
+ });
+
+ expect(await driver.takeScreenshot()).toMatchImageSnapshot(imageSnapshotOptions);
+
+ await pageObjects.runHook('useTextBoxValue', [], textBoxValue => textBoxValue[1]('Hello, World!'));
+ await pageObjects.runHook('useTextBoxSubmit', [], textBoxSubmit => textBoxSubmit());
+
+ await driver.wait(minNumActivitiesShown(4), timeouts.directLine);
+ await driver.wait(scrollToBottomCompleted(), timeouts.scrollToBottom);
+
+ expect(await driver.takeScreenshot()).toMatchImageSnapshot(imageSnapshotOptions);
});
diff --git a/__tests__/hooks/useVoiceSelector.js b/__tests__/hooks/useVoiceSelector.js
index dfc07ba405..af4087e05d 100644
--- a/__tests__/hooks/useVoiceSelector.js
+++ b/__tests__/hooks/useVoiceSelector.js
@@ -34,12 +34,12 @@ test('calling voiceSelector should use selectVoice from props', async () => {
])
)
).resolves.toMatchInlineSnapshot(`
- Object {
- "default": false,
- "lang": "zh-YUE",
- "localService": true,
- "name": "Mock Voice (zh-YUE)",
- "voiceURI": "mock://web-speech/voice/zh-YUE",
- }
- `);
+ Object {
+ "default": false,
+ "lang": "zh-YUE",
+ "localService": true,
+ "name": "Mock Voice (zh-YUE)",
+ "voiceURI": "mock://web-speech/voice/zh-YUE",
+ }
+ `);
});
diff --git a/packages/bundle/src/adaptiveCards/Attachment/AdaptiveCardRenderer.js b/packages/bundle/src/adaptiveCards/Attachment/AdaptiveCardRenderer.js
index ddbf106a7f..303cf98420 100644
--- a/packages/bundle/src/adaptiveCards/Attachment/AdaptiveCardRenderer.js
+++ b/packages/bundle/src/adaptiveCards/Attachment/AdaptiveCardRenderer.js
@@ -3,7 +3,7 @@
import PropTypes from 'prop-types';
import React, { useCallback, useLayoutEffect, useRef, useState } from 'react';
-import { Components, connectToWebChat, getTabIndex, hooks } from 'botframework-webchat-component';
+import { Components, getTabIndex, hooks } from 'botframework-webchat-component';
import useAdaptiveCardsHostConfig from '../hooks/useAdaptiveCardsHostConfig';
import useAdaptiveCardsPackage from '../hooks/useAdaptiveCardsPackage';
@@ -226,6 +226,4 @@ AdaptiveCardRenderer.defaultProps = {
tapAction: undefined
};
-export default connectToWebChat(({ tapAction }) => ({
- tapAction
-}))(AdaptiveCardRenderer);
+export default AdaptiveCardRenderer;
diff --git a/packages/bundle/src/adaptiveCards/Attachment/AnimationCardAttachment.js b/packages/bundle/src/adaptiveCards/Attachment/AnimationCardAttachment.js
index f8d2e784b8..14e570f3ec 100644
--- a/packages/bundle/src/adaptiveCards/Attachment/AnimationCardAttachment.js
+++ b/packages/bundle/src/adaptiveCards/Attachment/AnimationCardAttachment.js
@@ -9,12 +9,7 @@ import CommonCard from './CommonCard';
const { ImageContent, VideoContent } = Components;
const { useStyleSet } = hooks;
-const AnimationCardAttachment = ({
- adaptiveCardHostConfig,
- adaptiveCards,
- attachment,
- attachment: { content: { media = [] } } = {}
-}) => {
+const AnimationCardAttachment = ({ attachment, attachment: { content: { media = [] } } = {} }) => {
const [{ animationCardAttachment: animationCardAttachmentStyleSet }] = useStyleSet();
return (
@@ -27,18 +22,12 @@ const AnimationCardAttachment = ({
))}
-
- {dictateState === STARTING &&
-